code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import alexnet
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('alexnet_v2/fc7'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 1, 1, 4096])
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/alexnet_test.py | alexnet_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.overfeat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import overfeat
class OverFeatTest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes)
self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 281, 281
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 281, 281
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1',
'overfeat/pool1',
'overfeat/conv2',
'overfeat/pool2',
'overfeat/conv3',
'overfeat/conv4',
'overfeat/conv5',
'overfeat/pool5',
'overfeat/fc6',
'overfeat/fc7',
'overfeat/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 231, 231
num_classes = None
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1',
'overfeat/pool1',
'overfeat/conv2',
'overfeat/pool2',
'overfeat/conv3',
'overfeat/conv4',
'overfeat/conv5',
'overfeat/pool5',
'overfeat/fc6',
'overfeat/fc7'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('overfeat/fc7'))
def testModelVariables(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random.uniform((batch_size, height, width, 3))
overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1/weights',
'overfeat/conv1/biases',
'overfeat/conv2/weights',
'overfeat/conv2/biases',
'overfeat/conv3/weights',
'overfeat/conv3/biases',
'overfeat/conv4/weights',
'overfeat/conv4/biases',
'overfeat/conv5/weights',
'overfeat/conv5/biases',
'overfeat/fc6/weights',
'overfeat/fc6/biases',
'overfeat/fc7/weights',
'overfeat/fc7/biases',
'overfeat/fc8/weights',
'overfeat/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 231, 231
num_classes = 1000
with self.test_session():
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.test_session():
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 231, 231
with self.test_session() as sess:
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/overfeat_test.py | overfeat_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase):
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]:
tf.reset_default_graph()
_, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2)
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testMultiplier(self):
op = mobilenet.op
new_def = copy.deepcopy(mobilenet_v2.V2_DEF)
def inverse_multiplier(output_params, multiplier):
output_params['num_outputs'] = int(
output_params['num_outputs'] / multiplier)
new_def['spec'][0] = op(
slim.conv2d,
kernel_size=(3, 3),
multiplier_func=inverse_multiplier,
num_outputs=16)
_ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=new_def,
depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
# Expect first layer to be 160 (16 / 0.1), and other layers
# their max(original size * 0.1, 8)
self.assertEqual([160, 8, 48, 8, 48], s[:5])
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet.training_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/mobilenet/mobilenet_v2_test.py | mobilenet_v2_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolution blocks for mobilenet."""
import contextlib
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _split_divisible(num, num_ways, divisible_by=8):
"""Evenly splits num, num_ways so each piece is a multiple of divisible_by."""
assert num % divisible_by == 0
assert num / num_ways >= divisible_by
# Note: want to round down, we adjust each split to match the total.
base = num // num_ways // divisible_by * divisible_by
result = []
accumulated = 0
for i in range(num_ways):
r = base
while accumulated + r < num * (i + 1) / num_ways:
r += divisible_by
result.append(r)
accumulated += r
assert accumulated == num
return result
@contextlib.contextmanager
def _v1_compatible_scope_naming(scope):
"""v1 compatible scope naming."""
if scope is None: # Create uniqified separable blocks.
with tf.variable_scope(None, default_name='separable') as s, \
tf.name_scope(s.original_name_scope):
yield ''
else:
# We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts.
# which provide numbered scopes.
scope += '_'
yield scope
@slim.add_arg_scope
def split_separable_conv2d(input_tensor,
num_outputs,
scope=None,
normalizer_fn=None,
stride=1,
rate=1,
endpoints=None,
use_explicit_padding=False):
"""Separable mobilenet V1 style convolution.
Depthwise convolution, with default non-linearity,
followed by 1x1 depthwise convolution. This is similar to
slim.separable_conv2d, but differs in tha it applies batch
normalization and non-linearity to depthwise. This matches
the basic building of Mobilenet Paper
(https://arxiv.org/abs/1704.04861)
Args:
input_tensor: input
num_outputs: number of outputs
scope: optional name of the scope. Note if provided it will use
scope_depthwise for deptwhise, and scope_pointwise for pointwise.
normalizer_fn: which normalizer function to use for depthwise/pointwise
stride: stride
rate: output rate (also known as dilation rate)
endpoints: optional, if provided, will export additional tensors to it.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
output tesnor
"""
with _v1_compatible_scope_naming(scope) as scope:
dw_scope = scope + 'depthwise'
endpoints = endpoints if endpoints is not None else {}
kernel_size = [3, 3]
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
input_tensor = _fixed_padding(input_tensor, kernel_size, rate)
net = slim.separable_conv2d(
input_tensor,
None,
kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope=dw_scope)
endpoints[dw_scope] = net
pw_scope = scope + 'pointwise'
net = slim.conv2d(
net,
num_outputs, [1, 1],
stride=1,
normalizer_fn=normalizer_fn,
scope=pw_scope)
endpoints[pw_scope] = net
return net
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by)
def split_conv(input_tensor,
num_outputs,
num_ways,
scope,
divisible_by=8,
**kwargs):
"""Creates a split convolution.
Split convolution splits the input and output into
'num_blocks' blocks of approximately the same size each,
and only connects $i$-th input to $i$ output.
Args:
input_tensor: input tensor
num_outputs: number of output filters
num_ways: num blocks to split by.
scope: scope for all the operators.
divisible_by: make sure that every part is divisiable by this.
**kwargs: will be passed directly into conv2d operator
Returns:
tensor
"""
b = input_tensor.get_shape().as_list()[3]
if num_ways == 1 or min(b // num_ways,
num_outputs // num_ways) < divisible_by:
# Don't do any splitting if we end up with less than 8 filters
# on either side.
return slim.conv2d(input_tensor, num_outputs, [1, 1], scope=scope, **kwargs)
outs = []
input_splits = _split_divisible(b, num_ways, divisible_by=divisible_by)
output_splits = _split_divisible(
num_outputs, num_ways, divisible_by=divisible_by)
inputs = tf.split(input_tensor, input_splits, axis=3, name='split_' + scope)
base = scope
for i, (input_tensor, out_size) in enumerate(zip(inputs, output_splits)):
scope = base + '_part_%d' % (i,)
n = slim.conv2d(input_tensor, out_size, [1, 1], scope=scope, **kwargs)
n = tf.identity(n, scope + '_output')
outs.append(n)
return tf.concat(outs, 3, name=scope + '_concat')
@slim.add_arg_scope
def expanded_conv(input_tensor,
num_outputs,
expansion_size=expand_input_by_factor(6),
stride=1,
rate=1,
kernel_size=(3, 3),
residual=True,
normalizer_fn=None,
split_projection=1,
split_expansion=1,
split_divisible_by=8,
expansion_transform=None,
depthwise_location='expansion',
depthwise_channel_multiplier=1,
endpoints=None,
use_explicit_padding=False,
padding='SAME',
inner_activation_fn=None,
depthwise_activation_fn=None,
project_activation_fn=tf.identity,
depthwise_fn=slim.separable_conv2d,
expansion_fn=split_conv,
projection_fn=split_conv,
scope=None):
"""Depthwise Convolution Block with expansion.
Builds a composite convolution that has the following structure
expansion (1x1) -> depthwise (kernel_size) -> projection (1x1)
Args:
input_tensor: input
num_outputs: number of outputs in the final layer.
expansion_size: the size of expansion, could be a constant or a callable.
If latter it will be provided 'num_inputs' as an input. For forward
compatibility it should accept arbitrary keyword arguments.
Default will expand the input by factor of 6.
stride: depthwise stride
rate: depthwise rate
kernel_size: depthwise kernel
residual: whether to include residual connection between input
and output.
normalizer_fn: batchnorm or otherwise
split_projection: how many ways to split projection operator
(that is conv expansion->bottleneck)
split_expansion: how many ways to split expansion op
(that is conv bottleneck->expansion) ops will keep depth divisible
by this value.
split_divisible_by: make sure every split group is divisible by this number.
expansion_transform: Optional function that takes expansion
as a single input and returns output.
depthwise_location: where to put depthwise covnvolutions supported
values None, 'input', 'output', 'expansion'
depthwise_channel_multiplier: depthwise channel multiplier:
each input will replicated (with different filters)
that many times. So if input had c channels,
output will have c x depthwise_channel_multpilier.
endpoints: An optional dictionary into which intermediate endpoints are
placed. The keys "expansion_output", "depthwise_output",
"projection_output" and "expansion_transform" are always populated, even
if the corresponding functions are not invoked.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
padding: Padding type to use if `use_explicit_padding` is not set.
inner_activation_fn: activation function to use in all inner convolutions.
If none, will rely on slim default scopes.
depthwise_activation_fn: activation function to use for deptwhise only.
If not provided will rely on slim default scopes. If both
inner_activation_fn and depthwise_activation_fn are provided,
depthwise_activation_fn takes precedence over inner_activation_fn.
project_activation_fn: activation function for the project layer.
(note this layer is not affected by inner_activation_fn)
depthwise_fn: Depthwise convolution function.
expansion_fn: Expansion convolution function. If use custom function then
"split_expansion" and "split_divisible_by" will be ignored.
projection_fn: Projection convolution function. If use custom function then
"split_projection" and "split_divisible_by" will be ignored.
scope: optional scope.
Returns:
Tensor of depth num_outputs
Raises:
TypeError: on inval
"""
conv_defaults = {}
dw_defaults = {}
if inner_activation_fn is not None:
conv_defaults['activation_fn'] = inner_activation_fn
dw_defaults['activation_fn'] = inner_activation_fn
if depthwise_activation_fn is not None:
dw_defaults['activation_fn'] = depthwise_activation_fn
# pylint: disable=g-backslash-continuation
with tf.variable_scope(scope, default_name='expanded_conv') as s, \
tf.name_scope(s.original_name_scope), \
slim.arg_scope((slim.conv2d,), **conv_defaults), \
slim.arg_scope((slim.separable_conv2d,), **dw_defaults):
prev_depth = input_tensor.get_shape().as_list()[3]
if depthwise_location not in [None, 'input', 'output', 'expansion']:
raise TypeError('%r is unknown value for depthwise_location' %
depthwise_location)
if use_explicit_padding:
if padding != 'SAME':
raise TypeError('`use_explicit_padding` should only be used with '
'"SAME" padding.')
padding = 'VALID'
depthwise_func = functools.partial(
depthwise_fn,
num_outputs=None,
kernel_size=kernel_size,
depth_multiplier=depthwise_channel_multiplier,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope='depthwise')
# b1 -> b2 * r -> b2
# i -> (o * r) (bottleneck) -> o
input_tensor = tf.identity(input_tensor, 'input')
net = input_tensor
if depthwise_location == 'input':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if callable(expansion_size):
inner_size = expansion_size(num_inputs=prev_depth)
else:
inner_size = expansion_size
if inner_size > net.shape[3]:
if expansion_fn == split_conv:
expansion_fn = functools.partial(
expansion_fn,
num_ways=split_expansion,
divisible_by=split_divisible_by,
stride=1)
net = expansion_fn(
net,
inner_size,
scope='expand',
normalizer_fn=normalizer_fn)
net = tf.identity(net, 'expansion_output')
if endpoints is not None:
endpoints['expansion_output'] = net
if depthwise_location == 'expansion':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if expansion_transform:
net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor)
# Note in contrast with expansion, we always have
# projection to produce the desired output size.
if projection_fn == split_conv:
projection_fn = functools.partial(
projection_fn,
num_ways=split_projection,
divisible_by=split_divisible_by,
stride=1)
net = projection_fn(
net,
num_outputs,
scope='project',
normalizer_fn=normalizer_fn,
activation_fn=project_activation_fn)
if endpoints is not None:
endpoints['projection_output'] = net
if depthwise_location == 'output':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if callable(residual): # custom residual
net = residual(input_tensor=input_tensor, output_tensor=net)
elif (residual and
# stride check enforces that we don't add residuals when spatial
# dimensions are None
stride == 1 and
# Depth matches
net.get_shape().as_list()[3] ==
input_tensor.get_shape().as_list()[3]):
net += input_tensor
return tf.identity(net, name='output')
@slim.add_arg_scope
def squeeze_excite(input_tensor,
divisible_by=8,
squeeze_factor=3,
inner_activation_fn=tf.nn.relu,
gating_fn=tf.sigmoid,
squeeze_input_tensor=None,
pool=None):
"""Squeeze excite block for Mobilenet V3.
If the squeeze_input_tensor - or the input_tensor if squeeze_input_tensor is
None - contains variable dimensions (Nonetype in tensor shape), perform
average pooling (as the first step in the squeeze operation) by calling
reduce_mean across the H/W of the input tensor.
Args:
input_tensor: input tensor to apply SE block to.
divisible_by: ensures all inner dimensions are divisible by this number.
squeeze_factor: the factor of squeezing in the inner fully connected layer
inner_activation_fn: non-linearity to be used in inner layer.
gating_fn: non-linearity to be used for final gating function
squeeze_input_tensor: custom tensor to use for computing gating activation.
If provided the result will be input_tensor * SE(squeeze_input_tensor)
instead of input_tensor * SE(input_tensor).
pool: if number is provided will average pool with that kernel size
to compute inner tensor, followed by bilinear upsampling.
Returns:
Gated input_tensor. (e.g. X * SE(X))
"""
with tf.variable_scope('squeeze_excite'):
if squeeze_input_tensor is None:
squeeze_input_tensor = input_tensor
input_size = input_tensor.shape.as_list()[1:3]
pool_height, pool_width = squeeze_input_tensor.shape.as_list()[1:3]
stride = 1
if pool is not None and pool_height >= pool:
pool_height, pool_width, stride = pool, pool, pool
input_channels = squeeze_input_tensor.shape.as_list()[3]
output_channels = input_tensor.shape.as_list()[3]
squeeze_channels = _make_divisible(
input_channels / squeeze_factor, divisor=divisible_by)
if pool is None:
pooled = tf.reduce_mean(squeeze_input_tensor, axis=[1, 2], keepdims=True)
else:
pooled = tf.nn.avg_pool(
squeeze_input_tensor, (1, pool_height, pool_width, 1),
strides=(1, stride, stride, 1),
padding='VALID')
squeeze = slim.conv2d(
pooled,
kernel_size=(1, 1),
num_outputs=squeeze_channels,
normalizer_fn=None,
activation_fn=inner_activation_fn)
excite_outputs = output_channels
excite = slim.conv2d(squeeze, num_outputs=excite_outputs,
kernel_size=[1, 1],
normalizer_fn=None,
activation_fn=gating_fn)
if pool is not None:
# Note: As of 03/20/2019 only BILINEAR (the default) with
# align_corners=True has gradients implemented in TPU.
excite = tf.image.resize_images(
excite, input_size,
align_corners=True)
result = input_tensor * excite
return result
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/mobilenet/conv_blocks.py | conv_blocks.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return int(new_v)
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, multiplier_func=depth_multiplier, **params):
multiplier = params.pop('multiplier_transform', multiplier_func)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Args:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
# pylint: disable=g-backslash-continuation
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1 and kernel size is not [1, 1].
if layer_rate > 1:
if tuple(params.get('kernel_size', [])) != (1, 1):
# We will apply atrous rate in the following cases:
# 1) When kernel_size is not in params, the operation then uses
# default kernel size 3x3.
# 2) When kernel_size is in params, and if the kernel_size is not
# equal to (1, 1) (there is no need to apply atrous convolution to
# any 1x1 convolution).
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
use_reduce_mean_for_pooling=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
use_reduce_mean_for_pooling: if True use the reduce_mean for pooling. If
True use the global_pool function that provides some optimization.
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net, use_reduce_mean_for_pooling)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor,
use_reduce_mean_for_pooling=False,
pool_op=tf.nn.avg_pool2d):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
use_reduce_mean_for_pooling: if True use reduce_mean for pooling
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
if use_reduce_mean_for_pooling:
return tf.reduce_mean(
input_tensor, [1, 2], keepdims=True, name='ReduceMean')
else:
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(value=[
1,
tf.shape(input=input_tensor)[1],
tf.shape(input=input_tensor)[2], 1
])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(
stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/mobilenet/mobilenet.py | mobilenet.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet V3 conv defs and helper functions.
# pylint: disable=line-too-long
Model definitions and layer breakdowns:
==================
==== V3 LARGE ====
==================
Conv2D MobilenetV3/Conv/Conv2D 351.2 k 1x224x224x3 432.0 5.42 M 1x112x112x16
Relu6 MobilenetV3/Conv/hard_swish/Relu6 ? - ? ? 1x112x112x16
DepthConv MobilenetV3/expanded_conv/depthwise/depthwise 401.4 k - 144.0 1.81 M 1x112x112x16
Relu MobilenetV3/expanded_conv/depthwise/Relu ? - ? ? 1x112x112x16
Conv2D MobilenetV3/expanded_conv/project/Conv2D 401.4 k 1x112x112x16 256.0 3.21 M 1x112x112x16
Conv2D MobilenetV3/expanded_conv_1/expand/Conv2D 1.00 M 1x112x112x16 1.02 k 12.8 M 1x112x112x64
Relu MobilenetV3/expanded_conv_1/expand/Relu ? - ? ? 1x112x112x64
DepthConv MobilenetV3/expanded_conv_1/depthwise/depthwise 1.00 M - 576.0 1.81 M 1x56x56x64
Relu MobilenetV3/expanded_conv_1/depthwise/Relu ? - ? ? 1x56x56x64
Conv2D MobilenetV3/expanded_conv_1/project/Conv2D 276.0 k 1x56x56x64 1.54 k 4.82 M 1x56x56x24
Conv2D MobilenetV3/expanded_conv_2/expand/Conv2D 301.1 k 1x56x56x24 1.73 k 5.42 M 1x56x56x72
Relu MobilenetV3/expanded_conv_2/expand/Relu ? - ? ? 1x56x56x72
DepthConv MobilenetV3/expanded_conv_2/depthwise/depthwise 451.6 k - 648.0 2.03 M 1x56x56x72
Relu MobilenetV3/expanded_conv_2/depthwise/Relu ? - ? ? 1x56x56x72
Conv2D MobilenetV3/expanded_conv_2/project/Conv2D 301.1 k 1x56x56x72 1.73 k 5.42 M 1x56x56x24
Conv2D MobilenetV3/expanded_conv_3/expand/Conv2D 301.1 k 1x56x56x24 1.73 k 5.42 M 1x56x56x72
Relu MobilenetV3/expanded_conv_3/expand/Relu ? - ? ? 1x56x56x72
DepthConv MobilenetV3/expanded_conv_3/depthwise/depthwise 282.2 k - 1.80 k 1.41 M 1x28x28x72
Relu MobilenetV3/expanded_conv_3/depthwise/Relu ? - ? ? 1x28x28x72
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Conv2D 96.0 1x1x1x72 1.73 k 1.73 k 1x1x1x24
Relu MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x24
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Conv2D 96.0 1x1x1x24 1.73 k 1.73 k 1x1x1x72
Relu6 MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x72
Conv2D MobilenetV3/expanded_conv_3/project/Conv2D 87.8 k 1x28x28x72 2.88 k 2.26 M 1x28x28x40
Conv2D MobilenetV3/expanded_conv_4/expand/Conv2D 125.4 k 1x28x28x40 4.80 k 3.76 M 1x28x28x120
Relu MobilenetV3/expanded_conv_4/expand/Relu ? - ? ? 1x28x28x120
DepthConv MobilenetV3/expanded_conv_4/depthwise/depthwise 188.2 k - 3.00 k 2.35 M 1x28x28x120
Relu MobilenetV3/expanded_conv_4/depthwise/Relu ? - ? ? 1x28x28x120
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Conv2D 152.0 1x1x1x120 3.84 k 3.84 k 1x1x1x32
Relu MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x32
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Conv2D 152.0 1x1x1x32 3.84 k 3.84 k 1x1x1x120
Relu6 MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_4/project/Conv2D 125.4 k 1x28x28x120 4.80 k 3.76 M 1x28x28x40
Conv2D MobilenetV3/expanded_conv_5/expand/Conv2D 125.4 k 1x28x28x40 4.80 k 3.76 M 1x28x28x120
Relu MobilenetV3/expanded_conv_5/expand/Relu ? - ? ? 1x28x28x120
DepthConv MobilenetV3/expanded_conv_5/depthwise/depthwise 188.2 k - 3.00 k 2.35 M 1x28x28x120
Relu MobilenetV3/expanded_conv_5/depthwise/Relu ? - ? ? 1x28x28x120
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Conv2D 152.0 1x1x1x120 3.84 k 3.84 k 1x1x1x32
Relu MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x32
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Conv2D 152.0 1x1x1x32 3.84 k 3.84 k 1x1x1x120
Relu6 MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_5/project/Conv2D 125.4 k 1x28x28x120 4.80 k 3.76 M 1x28x28x40
Conv2D MobilenetV3/expanded_conv_6/expand/Conv2D 219.5 k 1x28x28x40 9.60 k 7.53 M 1x28x28x240
Relu6 MobilenetV3/expanded_conv_6/expand/hard_swish/Relu6 ? - ? ? 1x28x28x240
DepthConv MobilenetV3/expanded_conv_6/depthwise/depthwise 235.2 k - 2.16 k 423.4 k 1x14x14x240
Relu6 MobilenetV3/expanded_conv_6/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x240
Conv2D MobilenetV3/expanded_conv_6/project/Conv2D 62.7 k 1x14x14x240 19.2 k 3.76 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_7/expand/Conv2D 54.9 k 1x14x14x80 16.0 k 3.14 M 1x14x14x200
Relu6 MobilenetV3/expanded_conv_7/expand/hard_swish/Relu6 ? - ? ? 1x14x14x200
DepthConv MobilenetV3/expanded_conv_7/depthwise/depthwise 78.4 k - 1.80 k 352.8 k 1x14x14x200
Relu6 MobilenetV3/expanded_conv_7/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x200
Conv2D MobilenetV3/expanded_conv_7/project/Conv2D 54.9 k 1x14x14x200 16.0 k 3.14 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_8/expand/Conv2D 51.7 k 1x14x14x80 14.7 k 2.89 M 1x14x14x184
Relu6 MobilenetV3/expanded_conv_8/expand/hard_swish/Relu6 ? - ? ? 1x14x14x184
DepthConv MobilenetV3/expanded_conv_8/depthwise/depthwise 72.1 k - 1.66 k 324.6 k 1x14x14x184
Relu6 MobilenetV3/expanded_conv_8/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x184
Conv2D MobilenetV3/expanded_conv_8/project/Conv2D 51.7 k 1x14x14x184 14.7 k 2.89 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_9/expand/Conv2D 51.7 k 1x14x14x80 14.7 k 2.89 M 1x14x14x184
Relu6 MobilenetV3/expanded_conv_9/expand/hard_swish/Relu6 ? - ? ? 1x14x14x184
DepthConv MobilenetV3/expanded_conv_9/depthwise/depthwise 72.1 k - 1.66 k 324.6 k 1x14x14x184
Relu6 MobilenetV3/expanded_conv_9/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x184
Conv2D MobilenetV3/expanded_conv_9/project/Conv2D 51.7 k 1x14x14x184 14.7 k 2.89 M 1x14x14x80
Conv2D MobilenetV3/expanded_conv_10/expand/Conv2D 109.8 k 1x14x14x80 38.4 k 7.53 M 1x14x14x480
Relu6 MobilenetV3/expanded_conv_10/expand/hard_swish/Relu6 ? - ? ? 1x14x14x480
DepthConv MobilenetV3/expanded_conv_10/depthwise/depthwise 188.2 k - 4.32 k 846.7 k 1x14x14x480
Relu6 MobilenetV3/expanded_conv_10/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x480
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Conv2D 600.0 1x1x1x480 57.6 k 57.6 k 1x1x1x120
Relu MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Conv2D 600.0 1x1x1x120 57.6 k 57.6 k 1x1x1x480
Relu6 MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x480
Conv2D MobilenetV3/expanded_conv_10/project/Conv2D 116.0 k 1x14x14x480 53.8 k 10.5 M 1x14x14x112
Conv2D MobilenetV3/expanded_conv_11/expand/Conv2D 153.7 k 1x14x14x112 75.3 k 14.8 M 1x14x14x672
Relu6 MobilenetV3/expanded_conv_11/expand/hard_swish/Relu6 ? - ? ? 1x14x14x672
DepthConv MobilenetV3/expanded_conv_11/depthwise/depthwise 263.4 k - 6.05 k 1.19 M 1x14x14x672
Relu6 MobilenetV3/expanded_conv_11/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x672
Conv2D MobilenetV3/expanded_conv_11/squeeze_excite/Conv/Conv2D 840.0 1x1x1x672 112.9 k 112.9 k 1x1x1x168
Relu MobilenetV3/expanded_conv_11/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x168
Conv2D MobilenetV3/expanded_conv_11/squeeze_excite/Conv_1/Conv2D 840.0 1x1x1x168 112.9 k 112.9 k 1x1x1x672
Relu6 MobilenetV3/expanded_conv_11/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x672
Conv2D MobilenetV3/expanded_conv_11/project/Conv2D 153.7 k 1x14x14x672 75.3 k 14.8 M 1x14x14x112
Conv2D MobilenetV3/expanded_conv_12/expand/Conv2D 153.7 k 1x14x14x112 75.3 k 14.8 M 1x14x14x672
Relu6 MobilenetV3/expanded_conv_12/expand/hard_swish/Relu6 ? - ? ? 1x14x14x672
DepthConv MobilenetV3/expanded_conv_12/depthwise/depthwise 164.6 k - 16.8 k 823.2 k 1x7x7x672
Relu6 MobilenetV3/expanded_conv_12/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x672
Conv2D MobilenetV3/expanded_conv_12/squeeze_excite/Conv/Conv2D 840.0 1x1x1x672 112.9 k 112.9 k 1x1x1x168
Relu MobilenetV3/expanded_conv_12/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x168
Conv2D MobilenetV3/expanded_conv_12/squeeze_excite/Conv_1/Conv2D 840.0 1x1x1x168 112.9 k 112.9 k 1x1x1x672
Relu6 MobilenetV3/expanded_conv_12/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x672
Conv2D MobilenetV3/expanded_conv_12/project/Conv2D 40.8 k 1x7x7x672 107.5 k 5.27 M 1x7x7x160
Conv2D MobilenetV3/expanded_conv_13/expand/Conv2D 54.9 k 1x7x7x160 153.6 k 7.53 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_13/expand/hard_swish/Relu6 ? - ? ? 1x7x7x960
DepthConv MobilenetV3/expanded_conv_13/depthwise/depthwise 94.1 k - 24.0 k 1.18 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_13/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x960
Conv2D MobilenetV3/expanded_conv_13/squeeze_excite/Conv/Conv2D 1.20 k 1x1x1x960 230.4 k 230.4 k 1x1x1x240
Relu MobilenetV3/expanded_conv_13/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_13/squeeze_excite/Conv_1/Conv2D 1.20 k 1x1x1x240 230.4 k 230.4 k 1x1x1x960
Relu6 MobilenetV3/expanded_conv_13/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x960
Conv2D MobilenetV3/expanded_conv_13/project/Conv2D 54.9 k 1x7x7x960 153.6 k 7.53 M 1x7x7x160
Conv2D MobilenetV3/expanded_conv_14/expand/Conv2D 54.9 k 1x7x7x160 153.6 k 7.53 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_14/expand/hard_swish/Relu6 ? - ? ? 1x7x7x960
DepthConv MobilenetV3/expanded_conv_14/depthwise/depthwise 94.1 k - 24.0 k 1.18 M 1x7x7x960
Relu6 MobilenetV3/expanded_conv_14/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x960
Conv2D MobilenetV3/expanded_conv_14/squeeze_excite/Conv/Conv2D 1.20 k 1x1x1x960 230.4 k 230.4 k 1x1x1x240
Relu MobilenetV3/expanded_conv_14/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_14/squeeze_excite/Conv_1/Conv2D 1.20 k 1x1x1x240 230.4 k 230.4 k 1x1x1x960
Relu6 MobilenetV3/expanded_conv_14/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x960
Conv2D MobilenetV3/expanded_conv_14/project/Conv2D 54.9 k 1x7x7x960 153.6 k 7.53 M 1x7x7x160
Conv2D MobilenetV3/Conv_1/Conv2D 54.9 k 1x7x7x160 153.6 k 7.53 M 1x7x7x960
Relu6 MobilenetV3/Conv_1/hard_swish/Relu6 ? - ? ? 1x7x7x960
AvgPool MobilenetV3/AvgPool2D/AvgPool ? 1x7x7x960 ? 47.0 k 1x1x1x960
Conv2D MobilenetV3/Conv_2/Conv2D 2.24 k 1x1x1x960 1.23 M 1.23 M 1x1x1x1280
Relu6 MobilenetV3/Conv_2/hard_swish/Relu6 ? - ? ? 1x1x1x1280
Conv2D MobilenetV3/Logits/Conv2d_1c_1x1/Conv2D 2.28 k 1x1x1x1280 1.28 M 1.28 M 1x1x1x1001
-----
==================
==== V3 SMALL ====
==================
op name ActMem ConvInput ConvParameters Madds OutputTensor
Conv2D MobilenetV3/Conv/Conv2D 351.2 k 1x224x224x3 432.0 5.42 M 1x112x112x16
Relu6 MobilenetV3/Conv/hard_swish/Relu6 ? - ? ? 1x112x112x16
DepthConv MobilenetV3/expanded_conv/depthwise/depthwise 250.9 k - 144.0 451.6 k 1x56x56x16
Relu MobilenetV3/expanded_conv/depthwise/Relu ? - ? ? 1x56x56x16
Conv2D MobilenetV3/expanded_conv/squeeze_excite/Conv/Conv2D 24.0 1x1x1x16 128.0 128.0 1x1x1x8
Relu MobilenetV3/expanded_conv/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x8
Conv2D MobilenetV3/expanded_conv/squeeze_excite/Conv_1/Conv2D 24.0 1x1x1x8 128.0 128.0 1x1x1x16
Relu6 MobilenetV3/expanded_conv/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x16
Conv2D MobilenetV3/expanded_conv/project/Conv2D 100.4 k 1x56x56x16 256.0 802.8 k 1x56x56x16
Conv2D MobilenetV3/expanded_conv_1/expand/Conv2D 276.0 k 1x56x56x16 1.15 k 3.61 M 1x56x56x72
Relu MobilenetV3/expanded_conv_1/expand/Relu ? - ? ? 1x56x56x72
DepthConv MobilenetV3/expanded_conv_1/depthwise/depthwise 282.2 k - 648.0 508.0 k 1x28x28x72
Relu MobilenetV3/expanded_conv_1/depthwise/Relu ? - ? ? 1x28x28x72
Conv2D MobilenetV3/expanded_conv_1/project/Conv2D 75.3 k 1x28x28x72 1.73 k 1.35 M 1x28x28x24
Conv2D MobilenetV3/expanded_conv_2/expand/Conv2D 87.8 k 1x28x28x24 2.11 k 1.66 M 1x28x28x88
Relu MobilenetV3/expanded_conv_2/expand/Relu ? - ? ? 1x28x28x88
DepthConv MobilenetV3/expanded_conv_2/depthwise/depthwise 138.0 k - 792.0 620.9 k 1x28x28x88
Relu MobilenetV3/expanded_conv_2/depthwise/Relu ? - ? ? 1x28x28x88
Conv2D MobilenetV3/expanded_conv_2/project/Conv2D 87.8 k 1x28x28x88 2.11 k 1.66 M 1x28x28x24
Conv2D MobilenetV3/expanded_conv_3/expand/Conv2D 94.1 k 1x28x28x24 2.30 k 1.81 M 1x28x28x96
Relu6 MobilenetV3/expanded_conv_3/expand/hard_swish/Relu6 ? - ? ? 1x28x28x96
DepthConv MobilenetV3/expanded_conv_3/depthwise/depthwise 94.1 k - 2.40 k 470.4 k 1x14x14x96
Relu6 MobilenetV3/expanded_conv_3/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x96
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Conv2D 120.0 1x1x1x96 2.30 k 2.30 k 1x1x1x24
Relu MobilenetV3/expanded_conv_3/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x24
Conv2D MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Conv2D 120.0 1x1x1x24 2.30 k 2.30 k 1x1x1x96
Relu6 MobilenetV3/expanded_conv_3/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x96
Conv2D MobilenetV3/expanded_conv_3/project/Conv2D 26.7 k 1x14x14x96 3.84 k 752.6 k 1x14x14x40
Conv2D MobilenetV3/expanded_conv_4/expand/Conv2D 54.9 k 1x14x14x40 9.60 k 1.88 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_4/expand/hard_swish/Relu6 ? - ? ? 1x14x14x240
DepthConv MobilenetV3/expanded_conv_4/depthwise/depthwise 94.1 k - 6.00 k 1.18 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_4/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x240
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Conv2D 304.0 1x1x1x240 15.4 k 15.4 k 1x1x1x64
Relu MobilenetV3/expanded_conv_4/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x64
Conv2D MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Conv2D 304.0 1x1x1x64 15.4 k 15.4 k 1x1x1x240
Relu6 MobilenetV3/expanded_conv_4/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_4/project/Conv2D 54.9 k 1x14x14x240 9.60 k 1.88 M 1x14x14x40
Conv2D MobilenetV3/expanded_conv_5/expand/Conv2D 54.9 k 1x14x14x40 9.60 k 1.88 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_5/expand/hard_swish/Relu6 ? - ? ? 1x14x14x240
DepthConv MobilenetV3/expanded_conv_5/depthwise/depthwise 94.1 k - 6.00 k 1.18 M 1x14x14x240
Relu6 MobilenetV3/expanded_conv_5/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x240
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Conv2D 304.0 1x1x1x240 15.4 k 15.4 k 1x1x1x64
Relu MobilenetV3/expanded_conv_5/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x64
Conv2D MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Conv2D 304.0 1x1x1x64 15.4 k 15.4 k 1x1x1x240
Relu6 MobilenetV3/expanded_conv_5/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x240
Conv2D MobilenetV3/expanded_conv_5/project/Conv2D 54.9 k 1x14x14x240 9.60 k 1.88 M 1x14x14x40
Conv2D MobilenetV3/expanded_conv_6/expand/Conv2D 31.4 k 1x14x14x40 4.80 k 940.8 k 1x14x14x120
Relu6 MobilenetV3/expanded_conv_6/expand/hard_swish/Relu6 ? - ? ? 1x14x14x120
DepthConv MobilenetV3/expanded_conv_6/depthwise/depthwise 47.0 k - 3.00 k 588.0 k 1x14x14x120
Relu6 MobilenetV3/expanded_conv_6/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x120
Conv2D MobilenetV3/expanded_conv_6/squeeze_excite/Conv/Conv2D 152.0 1x1x1x120 3.84 k 3.84 k 1x1x1x32
Relu MobilenetV3/expanded_conv_6/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x32
Conv2D MobilenetV3/expanded_conv_6/squeeze_excite/Conv_1/Conv2D 152.0 1x1x1x32 3.84 k 3.84 k 1x1x1x120
Relu6 MobilenetV3/expanded_conv_6/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x120
Conv2D MobilenetV3/expanded_conv_6/project/Conv2D 32.9 k 1x14x14x120 5.76 k 1.13 M 1x14x14x48
Conv2D MobilenetV3/expanded_conv_7/expand/Conv2D 37.6 k 1x14x14x48 6.91 k 1.35 M 1x14x14x144
Relu6 MobilenetV3/expanded_conv_7/expand/hard_swish/Relu6 ? - ? ? 1x14x14x144
DepthConv MobilenetV3/expanded_conv_7/depthwise/depthwise 56.4 k - 3.60 k 705.6 k 1x14x14x144
Relu6 MobilenetV3/expanded_conv_7/depthwise/hard_swish/Relu6 ? - ? ? 1x14x14x144
Conv2D MobilenetV3/expanded_conv_7/squeeze_excite/Conv/Conv2D 184.0 1x1x1x144 5.76 k 5.76 k 1x1x1x40
Relu MobilenetV3/expanded_conv_7/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x40
Conv2D MobilenetV3/expanded_conv_7/squeeze_excite/Conv_1/Conv2D 184.0 1x1x1x40 5.76 k 5.76 k 1x1x1x144
Relu6 MobilenetV3/expanded_conv_7/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x144
Conv2D MobilenetV3/expanded_conv_7/project/Conv2D 37.6 k 1x14x14x144 6.91 k 1.35 M 1x14x14x48
Conv2D MobilenetV3/expanded_conv_8/expand/Conv2D 65.9 k 1x14x14x48 13.8 k 2.71 M 1x14x14x288
Relu6 MobilenetV3/expanded_conv_8/expand/hard_swish/Relu6 ? - ? ? 1x14x14x288
DepthConv MobilenetV3/expanded_conv_8/depthwise/depthwise 70.6 k - 7.20 k 352.8 k 1x7x7x288
Relu6 MobilenetV3/expanded_conv_8/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x288
Conv2D MobilenetV3/expanded_conv_8/squeeze_excite/Conv/Conv2D 360.0 1x1x1x288 20.7 k 20.7 k 1x1x1x72
Relu MobilenetV3/expanded_conv_8/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x72
Conv2D MobilenetV3/expanded_conv_8/squeeze_excite/Conv_1/Conv2D 360.0 1x1x1x72 20.7 k 20.7 k 1x1x1x288
Relu6 MobilenetV3/expanded_conv_8/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x288
Conv2D MobilenetV3/expanded_conv_8/project/Conv2D 18.8 k 1x7x7x288 27.6 k 1.35 M 1x7x7x96
Conv2D MobilenetV3/expanded_conv_9/expand/Conv2D 32.9 k 1x7x7x96 55.3 k 2.71 M 1x7x7x576
Relu6 MobilenetV3/expanded_conv_9/expand/hard_swish/Relu6 ? - ? ? 1x7x7x576
DepthConv MobilenetV3/expanded_conv_9/depthwise/depthwise 56.4 k - 14.4 k 705.6 k 1x7x7x576
Relu6 MobilenetV3/expanded_conv_9/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x576
Conv2D MobilenetV3/expanded_conv_9/squeeze_excite/Conv/Conv2D 720.0 1x1x1x576 82.9 k 82.9 k 1x1x1x144
Relu MobilenetV3/expanded_conv_9/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x144
Conv2D MobilenetV3/expanded_conv_9/squeeze_excite/Conv_1/Conv2D 720.0 1x1x1x144 82.9 k 82.9 k 1x1x1x576
Relu6 MobilenetV3/expanded_conv_9/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x576
Conv2D MobilenetV3/expanded_conv_9/project/Conv2D 32.9 k 1x7x7x576 55.3 k 2.71 M 1x7x7x96
Conv2D MobilenetV3/expanded_conv_10/expand/Conv2D 32.9 k 1x7x7x96 55.3 k 2.71 M 1x7x7x576
Relu6 MobilenetV3/expanded_conv_10/expand/hard_swish/Relu6 ? - ? ? 1x7x7x576
DepthConv MobilenetV3/expanded_conv_10/depthwise/depthwise 56.4 k - 14.4 k 705.6 k 1x7x7x576
Relu6 MobilenetV3/expanded_conv_10/depthwise/hard_swish/Relu6 ? - ? ? 1x7x7x576
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Conv2D 720.0 1x1x1x576 82.9 k 82.9 k 1x1x1x144
Relu MobilenetV3/expanded_conv_10/squeeze_excite/Conv/Relu ? - ? ? 1x1x1x144
Conv2D MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Conv2D 720.0 1x1x1x144 82.9 k 82.9 k 1x1x1x576
Relu6 MobilenetV3/expanded_conv_10/squeeze_excite/Conv_1/Relu6 ? - ? ? 1x1x1x576
Conv2D MobilenetV3/expanded_conv_10/project/Conv2D 32.9 k 1x7x7x576 55.3 k 2.71 M 1x7x7x96
Conv2D MobilenetV3/Conv_1/Conv2D 32.9 k 1x7x7x96 55.3 k 2.71 M 1x7x7x576
Relu6 MobilenetV3/Conv_1/hard_swish/Relu6 ? - ? ? 1x7x7x576
AvgPool MobilenetV3/AvgPool2D/AvgPool ? 1x7x7x576 ? 28.2 k 1x1x1x576
Conv2D MobilenetV3/Conv_2/Conv2D 1.60 k 1x1x1x576 589.8 k 589.8 k 1x1x1x1024
Relu6 MobilenetV3/Conv_2/hard_swish/Relu6 ? - ? ? 1x1x1x1024
Conv2D MobilenetV3/Logits/Conv2d_1c_1x1/Conv2D 2.02 k 1x1x1x1024 1.03 M 1.03 M 1x1x1x1001
-----
Total Total 2.96 M - 2.53 M 56.5 M -
====================
==== V3 EDGETPU ====
====================
op name ActMem ConvInput ConvParameters Madds OutputTensor
Conv2D MobilenetEdgeTPU/Conv/Conv2D 551.9 k 1x224x224x3 864.0 10.8 M 1x112x112x32
Relu MobilenetEdgeTPU/Conv/Relu ? - ? ? 1x112x112x32
Conv2D MobilenetEdgeTPU/expanded_conv/project/Conv2D 602.1 k 1x112x112x32 512.0 6.42 M 1x112x112x16
Conv2D MobilenetEdgeTPU/expanded_conv_1/expand/Conv2D 602.1 k 1x112x112x16 18.4 k 57.8 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_1/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_1/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_2/expand/Conv2D 501.8 k 1x56x56x32 36.9 k 115.6 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_2/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_2/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_3/expand/Conv2D 501.8 k 1x56x56x32 36.9 k 115.6 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_3/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_3/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_4/expand/Conv2D 501.8 k 1x56x56x32 36.9 k 115.6 M 1x56x56x128
Relu MobilenetEdgeTPU/expanded_conv_4/expand/Relu ? - ? ? 1x56x56x128
Conv2D MobilenetEdgeTPU/expanded_conv_4/project/Conv2D 501.8 k 1x56x56x128 4.10 k 12.8 M 1x56x56x32
Conv2D MobilenetEdgeTPU/expanded_conv_5/expand/Conv2D 301.1 k 1x56x56x32 73.7 k 57.8 M 1x28x28x256
Relu MobilenetEdgeTPU/expanded_conv_5/expand/Relu ? - ? ? 1x28x28x256
Conv2D MobilenetEdgeTPU/expanded_conv_5/project/Conv2D 238.3 k 1x28x28x256 12.3 k 9.63 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_6/expand/Conv2D 188.2 k 1x28x28x48 82.9 k 65.0 M 1x28x28x192
Relu MobilenetEdgeTPU/expanded_conv_6/expand/Relu ? - ? ? 1x28x28x192
Conv2D MobilenetEdgeTPU/expanded_conv_6/project/Conv2D 188.2 k 1x28x28x192 9.22 k 7.23 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_7/expand/Conv2D 188.2 k 1x28x28x48 82.9 k 65.0 M 1x28x28x192
Relu MobilenetEdgeTPU/expanded_conv_7/expand/Relu ? - ? ? 1x28x28x192
Conv2D MobilenetEdgeTPU/expanded_conv_7/project/Conv2D 188.2 k 1x28x28x192 9.22 k 7.23 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_8/expand/Conv2D 188.2 k 1x28x28x48 82.9 k 65.0 M 1x28x28x192
Relu MobilenetEdgeTPU/expanded_conv_8/expand/Relu ? - ? ? 1x28x28x192
Conv2D MobilenetEdgeTPU/expanded_conv_8/project/Conv2D 188.2 k 1x28x28x192 9.22 k 7.23 M 1x28x28x48
Conv2D MobilenetEdgeTPU/expanded_conv_9/expand/Conv2D 338.7 k 1x28x28x48 18.4 k 14.5 M 1x28x28x384
Relu MobilenetEdgeTPU/expanded_conv_9/expand/Relu ? - ? ? 1x28x28x384
DepthConv MobilenetEdgeTPU/expanded_conv_9/depthwise/depthwise 376.3 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_9/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_9/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_10/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_10/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_10/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_10/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_10/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_11/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_11/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_11/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_11/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_11/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_12/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_12/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_12/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_12/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_12/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_13/expand/Conv2D 169.3 k 1x14x14x96 73.7 k 14.5 M 1x14x14x768
Relu MobilenetEdgeTPU/expanded_conv_13/expand/Relu ? - ? ? 1x14x14x768
DepthConv MobilenetEdgeTPU/expanded_conv_13/depthwise/depthwise 301.1 k - 6.91 k 1.35 M 1x14x14x768
Relu MobilenetEdgeTPU/expanded_conv_13/depthwise/Relu ? - ? ? 1x14x14x768
Conv2D MobilenetEdgeTPU/expanded_conv_13/project/Conv2D 169.3 k 1x14x14x768 73.7 k 14.5 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_14/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_14/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_14/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_14/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_14/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_15/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_15/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_15/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_15/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_15/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_16/expand/Conv2D 94.1 k 1x14x14x96 36.9 k 7.23 M 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_16/expand/Relu ? - ? ? 1x14x14x384
DepthConv MobilenetEdgeTPU/expanded_conv_16/depthwise/depthwise 150.5 k - 3.46 k 677.4 k 1x14x14x384
Relu MobilenetEdgeTPU/expanded_conv_16/depthwise/Relu ? - ? ? 1x14x14x384
Conv2D MobilenetEdgeTPU/expanded_conv_16/project/Conv2D 94.1 k 1x14x14x384 36.9 k 7.23 M 1x14x14x96
Conv2D MobilenetEdgeTPU/expanded_conv_17/expand/Conv2D 169.3 k 1x14x14x96 73.7 k 14.5 M 1x14x14x768
Relu MobilenetEdgeTPU/expanded_conv_17/expand/Relu ? - ? ? 1x14x14x768
DepthConv MobilenetEdgeTPU/expanded_conv_17/depthwise/depthwise 188.2 k - 19.2 k 940.8 k 1x7x7x768
Relu MobilenetEdgeTPU/expanded_conv_17/depthwise/Relu ? - ? ? 1x7x7x768
Conv2D MobilenetEdgeTPU/expanded_conv_17/project/Conv2D 45.5 k 1x7x7x768 122.9 k 6.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_18/expand/Conv2D 39.2 k 1x7x7x160 102.4 k 5.02 M 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_18/expand/Relu ? - ? ? 1x7x7x640
DepthConv MobilenetEdgeTPU/expanded_conv_18/depthwise/depthwise 62.7 k - 16.0 k 784.0 k 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_18/depthwise/Relu ? - ? ? 1x7x7x640
Conv2D MobilenetEdgeTPU/expanded_conv_18/project/Conv2D 39.2 k 1x7x7x640 102.4 k 5.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_19/expand/Conv2D 39.2 k 1x7x7x160 102.4 k 5.02 M 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_19/expand/Relu ? - ? ? 1x7x7x640
DepthConv MobilenetEdgeTPU/expanded_conv_19/depthwise/depthwise 62.7 k - 16.0 k 784.0 k 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_19/depthwise/Relu ? - ? ? 1x7x7x640
Conv2D MobilenetEdgeTPU/expanded_conv_19/project/Conv2D 39.2 k 1x7x7x640 102.4 k 5.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_20/expand/Conv2D 39.2 k 1x7x7x160 102.4 k 5.02 M 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_20/expand/Relu ? - ? ? 1x7x7x640
DepthConv MobilenetEdgeTPU/expanded_conv_20/depthwise/depthwise 62.7 k - 16.0 k 784.0 k 1x7x7x640
Relu MobilenetEdgeTPU/expanded_conv_20/depthwise/Relu ? - ? ? 1x7x7x640
Conv2D MobilenetEdgeTPU/expanded_conv_20/project/Conv2D 39.2 k 1x7x7x640 102.4 k 5.02 M 1x7x7x160
Conv2D MobilenetEdgeTPU/expanded_conv_21/expand/Conv2D 70.6 k 1x7x7x160 204.8 k 10.0 M 1x7x7x1280
Relu MobilenetEdgeTPU/expanded_conv_21/expand/Relu ? - ? ? 1x7x7x1280
DepthConv MobilenetEdgeTPU/expanded_conv_21/depthwise/depthwise 125.4 k - 11.5 k 564.5 k 1x7x7x1280
Relu MobilenetEdgeTPU/expanded_conv_21/depthwise/Relu ? - ? ? 1x7x7x1280
Conv2D MobilenetEdgeTPU/expanded_conv_21/project/Conv2D 72.1 k 1x7x7x1280 245.8 k 12.0 M 1x7x7x192
Conv2D MobilenetEdgeTPU/Conv_1/Conv2D 72.1 k 1x7x7x192 245.8 k 12.0 M 1x7x7x1280
Relu MobilenetEdgeTPU/Conv_1/Relu ? - ? ? 1x7x7x1280
AvgPool MobilenetEdgeTPU/Logits/AvgPool2D ? 1x7x7x1280 ? 62.7 k 1x1x1x1280
Conv2D MobilenetEdgeTPU/Logits/Conv2d_1c_1x1/Conv2D 2.28 k 1x1x1x1280 1.28 M 1.28 M 1x1x1x1001
-----
Total Total 11.6 M - 4.05 M 990.7 M -
# pylint: enable=line-too-long
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
op = lib.op
expand_input = ops.expand_input_by_factor
# Squeeze Excite with all parameters filled-in, we use hard-sigmoid
# for gating function and relu for inner activation function.
squeeze_excite = functools.partial(
ops.squeeze_excite, squeeze_factor=4,
inner_activation_fn=tf.nn.relu,
gating_fn=lambda x: tf.nn.relu6(x+3)*0.16667)
# Wrap squeeze excite op as expansion_transform that takes
# both expansion and input tensor.
_se4 = lambda expansion_tensor, input_tensor: squeeze_excite(expansion_tensor)
def hard_swish(x):
with tf.name_scope('hard_swish'):
return x * tf.nn.relu6(x + np.float32(3)) * np.float32(1. / 6.)
def reduce_to_1x1(input_tensor, default_size=7, **kwargs):
h, w = input_tensor.shape.as_list()[1:3]
if h is not None and w == h:
k = [h, h]
else:
k = [default_size, default_size]
return slim.avg_pool2d(input_tensor, kernel_size=k, **kwargs)
def mbv3_op(ef, n, k, s=1, act=tf.nn.relu, se=None, **kwargs):
"""Defines a single Mobilenet V3 convolution block.
Args:
ef: expansion factor
n: number of output channels
k: stride of depthwise
s: stride
act: activation function in inner layers
se: squeeze excite function.
**kwargs: passed to expanded_conv
Returns:
An object (lib._Op) for inserting in conv_def, representing this operation.
"""
return op(
ops.expanded_conv,
expansion_size=expand_input(ef),
kernel_size=(k, k),
stride=s,
num_outputs=n,
inner_activation_fn=act,
expansion_transform=se,
**kwargs)
def mbv3_fused(ef, n, k, s=1, **kwargs):
"""Defines a single Mobilenet V3 convolution block.
Args:
ef: expansion factor
n: number of output channels
k: stride of depthwise
s: stride
**kwargs: will be passed to mbv3_op
Returns:
An object (lib._Op) for inserting in conv_def, representing this operation.
"""
expansion_fn = functools.partial(slim.conv2d, kernel_size=k, stride=s)
return mbv3_op(
ef,
n,
k=1,
s=s,
depthwise_location=None,
expansion_fn=expansion_fn,
**kwargs)
mbv3_op_se = functools.partial(mbv3_op, se=_se4)
DEFAULTS = {
(ops.expanded_conv,):
dict(
normalizer_fn=slim.batch_norm,
residual=True),
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm,
'activation_fn': tf.nn.relu,
},
(slim.batch_norm,): {
'center': True,
'scale': True
},
}
DEFAULTS_GROUP_NORM = {
(ops.expanded_conv,): dict(normalizer_fn=slim.group_norm, residual=True),
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.group_norm,
'activation_fn': tf.nn.relu,
},
(slim.group_norm,): {
'groups': 8
},
}
# Compatible checkpoint: http://mldash/5511169891790690458#scalars
V3_LARGE = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3),
activation_fn=hard_swish),
mbv3_op(ef=1, n=16, k=3),
mbv3_op(ef=4, n=24, k=3, s=2),
mbv3_op(ef=3, n=24, k=3, s=1),
mbv3_op_se(ef=3, n=40, k=5, s=2),
mbv3_op_se(ef=3, n=40, k=5, s=1),
mbv3_op_se(ef=3, n=40, k=5, s=1),
mbv3_op(ef=6, n=80, k=3, s=2, act=hard_swish),
mbv3_op(ef=2.5, n=80, k=3, s=1, act=hard_swish),
mbv3_op(ef=184/80., n=80, k=3, s=1, act=hard_swish),
mbv3_op(ef=184/80., n=80, k=3, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=112, k=3, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=112, k=3, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=160, k=5, s=2, act=hard_swish),
mbv3_op_se(ef=6, n=160, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=160, k=5, s=1, act=hard_swish),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=960,
activation_fn=hard_swish),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280,
normalizer_fn=None, activation_fn=hard_swish)
]))
# 72.2% accuracy.
V3_LARGE_MINIMALISTIC = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3)),
mbv3_op(ef=1, n=16, k=3),
mbv3_op(ef=4, n=24, k=3, s=2),
mbv3_op(ef=3, n=24, k=3, s=1),
mbv3_op(ef=3, n=40, k=3, s=2),
mbv3_op(ef=3, n=40, k=3, s=1),
mbv3_op(ef=3, n=40, k=3, s=1),
mbv3_op(ef=6, n=80, k=3, s=2),
mbv3_op(ef=2.5, n=80, k=3, s=1),
mbv3_op(ef=184 / 80., n=80, k=3, s=1),
mbv3_op(ef=184 / 80., n=80, k=3, s=1),
mbv3_op(ef=6, n=112, k=3, s=1),
mbv3_op(ef=6, n=112, k=3, s=1),
mbv3_op(ef=6, n=160, k=3, s=2),
mbv3_op(ef=6, n=160, k=3, s=1),
mbv3_op(ef=6, n=160, k=3, s=1),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=960),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d,
stride=1,
kernel_size=[1, 1],
num_outputs=1280,
normalizer_fn=None)
]))
# Compatible run: http://mldash/2023283040014348118#scalars
V3_SMALL = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3),
activation_fn=hard_swish),
mbv3_op_se(ef=1, n=16, k=3, s=2),
mbv3_op(ef=72./16, n=24, k=3, s=2),
mbv3_op(ef=(88./24), n=24, k=3, s=1),
mbv3_op_se(ef=4, n=40, k=5, s=2, act=hard_swish),
mbv3_op_se(ef=6, n=40, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=40, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=3, n=48, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=3, n=48, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=96, k=5, s=2, act=hard_swish),
mbv3_op_se(ef=6, n=96, k=5, s=1, act=hard_swish),
mbv3_op_se(ef=6, n=96, k=5, s=1, act=hard_swish),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=576,
activation_fn=hard_swish),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1024,
normalizer_fn=None, activation_fn=hard_swish)
]))
# 62% accuracy.
V3_SMALL_MINIMALISTIC = dict(
defaults=dict(DEFAULTS),
spec=([
# stage 1
op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3)),
mbv3_op(ef=1, n=16, k=3, s=2),
mbv3_op(ef=72. / 16, n=24, k=3, s=2),
mbv3_op(ef=(88. / 24), n=24, k=3, s=1),
mbv3_op(ef=4, n=40, k=3, s=2),
mbv3_op(ef=6, n=40, k=3, s=1),
mbv3_op(ef=6, n=40, k=3, s=1),
mbv3_op(ef=3, n=48, k=3, s=1),
mbv3_op(ef=3, n=48, k=3, s=1),
mbv3_op(ef=6, n=96, k=3, s=2),
mbv3_op(ef=6, n=96, k=3, s=1),
mbv3_op(ef=6, n=96, k=3, s=1),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=576),
op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'),
op(slim.conv2d,
stride=1,
kernel_size=[1, 1],
num_outputs=1024,
normalizer_fn=None)
]))
# EdgeTPU friendly variant of MobilenetV3 that uses fused convolutions
# instead of depthwise in the early layers.
V3_EDGETPU = dict(
defaults=dict(DEFAULTS),
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=(3, 3)),
mbv3_fused(k=3, s=1, ef=1, n=16),
mbv3_fused(k=3, s=2, ef=8, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=1, ef=4, n=32),
mbv3_fused(k=3, s=2, ef=8, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_fused(k=3, s=1, ef=4, n=48),
mbv3_op(k=3, s=2, ef=8, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=8, n=96, residual=False),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=3, s=1, ef=4, n=96),
mbv3_op(k=5, s=2, ef=8, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=5, s=1, ef=4, n=160),
mbv3_op(k=3, s=1, ef=8, n=192),
op(slim.conv2d, stride=1, num_outputs=1280, kernel_size=(1, 1)),
])
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV3',
conv_defs=None,
finegrain_classification_mode=False,
use_groupnorm=False,
**kwargs):
"""Creates mobilenet V3 network.
Inference mode is created by default. To create training use training_scope
below.
with slim.arg_scope(mobilenet_v3.training_scope()):
logits, endpoints = mobilenet_v3.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer.
scope: Scope of the operator
conv_defs: Which version to create. Could be large/small or
any conv_def (see mobilenet_v3.py for examples).
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
use_groupnorm: When set to True, use group_norm as normalizer_fn.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V3_LARGE
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if use_groupnorm:
conv_defs = copy.deepcopy(conv_defs)
conv_defs['defaults'] = dict(DEFAULTS_GROUP_NORM)
conv_defs['defaults'].update({
(slim.group_norm,): {
'groups': kwargs.pop('groups', 8)
}
})
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
conv_defs['spec'][-1] = conv_defs['spec'][-1]._replace(
multiplier_func=lambda params, multiplier: params)
depth_args = {}
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
mobilenet.default_image_size = 224
training_scope = lib.training_scope
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(
input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs)
def wrapped_partial(func, new_defaults=None,
**kwargs):
"""Partial function with new default parameters and updated docstring."""
if not new_defaults:
new_defaults = {}
def func_wrapper(*f_args, **f_kwargs):
new_kwargs = dict(new_defaults)
new_kwargs.update(f_kwargs)
return func(*f_args, **new_kwargs)
functools.update_wrapper(func_wrapper, func)
partial_func = functools.partial(func_wrapper, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
large = wrapped_partial(mobilenet, conv_defs=V3_LARGE)
small = wrapped_partial(mobilenet, conv_defs=V3_SMALL)
edge_tpu = wrapped_partial(mobilenet,
new_defaults={'scope': 'MobilenetEdgeTPU'},
conv_defs=V3_EDGETPU)
edge_tpu_075 = wrapped_partial(
mobilenet,
new_defaults={'scope': 'MobilenetEdgeTPU'},
conv_defs=V3_EDGETPU,
depth_multiplier=0.75,
finegrain_classification_mode=True)
# Minimalistic model that does not have Squeeze Excite blocks,
# Hardswish, or 5x5 depthwise convolution.
# This makes the model very friendly for a wide range of hardware
large_minimalistic = wrapped_partial(mobilenet, conv_defs=V3_LARGE_MINIMALISTIC)
small_minimalistic = wrapped_partial(mobilenet, conv_defs=V3_SMALL_MINIMALISTIC)
def _reduce_consecutive_layers(conv_defs, start_id, end_id, multiplier=0.5):
"""Reduce the outputs of consecutive layers with multiplier.
Args:
conv_defs: Mobilenet conv_defs.
start_id: 0-based index of the starting conv_def to be reduced.
end_id: 0-based index of the last conv_def to be reduced.
multiplier: The multiplier by which to reduce the conv_defs.
Returns:
Mobilenet conv_defs where the output sizes from layers [start_id, end_id],
inclusive, are reduced by multiplier.
Raises:
ValueError if any layer to be reduced does not have the 'num_outputs'
attribute.
"""
defs = copy.deepcopy(conv_defs)
for d in defs['spec'][start_id:end_id+1]:
d.params.update({
'num_outputs': np.int(np.round(d.params['num_outputs'] * multiplier))
})
return defs
V3_LARGE_DETECTION = _reduce_consecutive_layers(V3_LARGE, 13, 16)
V3_SMALL_DETECTION = _reduce_consecutive_layers(V3_SMALL, 9, 12)
__all__ = ['training_scope', 'mobilenet', 'V3_LARGE', 'V3_SMALL', 'large',
'small', 'V3_LARGE_DETECTION', 'V3_SMALL_DETECTION']
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/mobilenet/mobilenet_v3.py | mobilenet_v3.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.third_party.tensorflow_models.slim.nets.mobilenet.mobilenet_v3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from nets.mobilenet import mobilenet_v3
from google3.testing.pybase import parameterized
class MobilenetV3Test(tf.test.TestCase, parameterized.TestCase):
# pylint: disable = g-unreachable-test-method
def assertVariablesHaveNormalizerFn(self, use_groupnorm):
global_variables = [v.name for v in tf.global_variables()]
has_batch_norm = False
has_group_norm = False
for global_variable in global_variables:
if 'BatchNorm' in global_variable:
has_batch_norm = True
if 'GroupNorm' in global_variable:
has_group_norm = True
if use_groupnorm:
self.assertFalse(has_batch_norm)
self.assertTrue(has_group_norm)
else:
self.assertTrue(has_batch_norm)
self.assertFalse(has_group_norm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3Large(self, use_groupnorm):
logits, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
use_groupnorm=use_groupnorm)
self.assertEqual(endpoints['layer_19'].shape, [1, 1, 1, 1280])
self.assertEqual(logits.shape, [1, 1001])
self.assertVariablesHaveNormalizerFn(use_groupnorm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3Small(self, use_groupnorm):
_, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_SMALL,
use_groupnorm=use_groupnorm)
self.assertEqual(endpoints['layer_15'].shape, [1, 1, 1, 1024])
self.assertVariablesHaveNormalizerFn(use_groupnorm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetEdgeTpu(self, use_groupnorm):
_, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
use_groupnorm=use_groupnorm)
self.assertIn('Inference mode is created by default',
mobilenet_v3.edge_tpu.__doc__)
self.assertEqual(endpoints['layer_24'].shape, [1, 7, 7, 1280])
self.assertStartsWith(
endpoints['layer_24'].name, 'MobilenetEdgeTPU')
self.assertVariablesHaveNormalizerFn(use_groupnorm)
def testMobilenetEdgeTpuChangeScope(self):
_, endpoints = mobilenet_v3.edge_tpu(
tf.placeholder(tf.float32, (1, 224, 224, 3)), scope='Scope')
self.assertStartsWith(
endpoints['layer_24'].name, 'Scope')
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3BaseOnly(self, use_groupnorm):
result, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_LARGE,
use_groupnorm=use_groupnorm,
base_only=True,
final_endpoint='layer_17')
# Get the latest layer before average pool.
self.assertEqual(endpoints['layer_17'].shape, [1, 7, 7, 960])
self.assertEqual(result, endpoints['layer_17'])
self.assertVariablesHaveNormalizerFn(use_groupnorm)
def testMobilenetV3BaseOnly_VariableInput(self):
result, endpoints = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (None, None, None, 3)),
conv_defs=mobilenet_v3.V3_LARGE,
base_only=True,
final_endpoint='layer_17')
# Get the latest layer before average pool.
self.assertEqual(endpoints['layer_17'].shape.as_list(),
[None, None, None, 960])
self.assertEqual(result, endpoints['layer_17'])
# Use reduce mean for pooling and check for operation 'ReduceMean' in graph
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3WithReduceMean(self, use_groupnorm):
_, _ = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_SMALL,
use_groupnorm=use_groupnorm,
use_reduce_mean_for_pooling=True)
g = tf.get_default_graph()
reduce_mean = [v for v in g.get_operations() if 'ReduceMean' in v.name]
self.assertNotEmpty(reduce_mean)
self.assertVariablesHaveNormalizerFn(use_groupnorm)
@parameterized.named_parameters(('without_groupnorm', False),
('with_groupnorm', True))
def testMobilenetV3WithOutReduceMean(self, use_groupnorm):
_, _ = mobilenet_v3.mobilenet(
tf.placeholder(tf.float32, (1, 224, 224, 3)),
conv_defs=mobilenet_v3.V3_SMALL,
use_groupnorm=use_groupnorm,
use_reduce_mean_for_pooling=False)
g = tf.get_default_graph()
reduce_mean = [v for v in g.get_operations() if 'ReduceMean' in v.name]
self.assertEmpty(reduce_mean)
self.assertVariablesHaveNormalizerFn(use_groupnorm)
if __name__ == '__main__':
# absltest.main()
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/mobilenet/mobilenet_v3_test.py | mobilenet_v3_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Mobilenet V2.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet as lib
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
# Mobilenet v2 Definition with group normalization.
V2_DEF_GROUP_NORM = copy.deepcopy(V2_DEF)
V2_DEF_GROUP_NORM['defaults'] = {
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.group_norm, # pylint: disable=C0330
'activation_fn': tf.nn.relu6, # pylint: disable=C0330
}, # pylint: disable=C0330
(ops.expanded_conv,): {
'expansion_size': ops.expand_input_by_factor(6),
'split_expansion': 1,
'normalizer_fn': slim.group_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {
'padding': 'SAME'
}
}
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
activation_fn=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
activation_fn: Activation function to use, defaults to tf.nn.relu6 if not
specified.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
if activation_fn:
conv_defs = copy.deepcopy(conv_defs)
defaults = conv_defs['defaults']
conv_defaults = (
defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)])
conv_defaults['activation_fn'] = activation_fn
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
mobilenet.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Wrappers for mobilenet v2 with depth-multipliers. Be noticed that
# 'finegrain_classification_mode' is set to True, which means the embedding
# layer will not be shrinked when given a depth-multiplier < 1.0.
mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4)
mobilenet_v2_050 = wrapped_partial(mobilenet, depth_multiplier=0.50,
finegrain_classification_mode=True)
mobilenet_v2_035 = wrapped_partial(mobilenet, depth_multiplier=0.35,
finegrain_classification_mode=True)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
@slim.add_arg_scope
def mobilenet_base_group_norm(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
kwargs['conv_defs'] = V2_DEF_GROUP_NORM
kwargs['conv_defs']['defaults'].update({
(slim.group_norm,): {
'groups': kwargs.pop('groups', 8)
}
})
return mobilenet(
input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/mobilenet/mobilenet_v2.py | mobilenet_v2.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.pnasnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.nasnet import pnasnet
class PNASNetTest(tf.test.TestCase):
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildNonExistingLayerLargeModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random.uniform((5, 331, 331, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random.uniform((5, 224, 224, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1080])
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 540],
'Cell_0': [batch_size, 42, 42, 1080],
'Cell_1': [batch_size, 42, 42, 1080],
'Cell_2': [batch_size, 42, 42, 1080],
'Cell_3': [batch_size, 42, 42, 1080],
'Cell_4': [batch_size, 21, 21, 2160],
'Cell_5': [batch_size, 21, 21, 2160],
'Cell_6': [batch_size, 21, 21, 2160],
'Cell_7': [batch_size, 21, 21, 2160],
'Cell_8': [batch_size, 11, 11, 4320],
'Cell_9': [batch_size, 11, 11, 4320],
'Cell_10': [batch_size, 11, 11, 4320],
'Cell_11': [batch_size, 11, 11, 4320],
'global_pool': [batch_size, 4320],
# Logits and predictions
'AuxLogits': [batch_size, 1000],
'Predictions': [batch_size, 1000],
'Logits': [batch_size, 1000],
}
self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
endpoints_shapes = {
'Stem': [batch_size, 28, 28, 135],
'Cell_0': [batch_size, 28, 28, 270],
'Cell_1': [batch_size, 28, 28, 270],
'Cell_2': [batch_size, 28, 28, 270],
'Cell_3': [batch_size, 14, 14, 540],
'Cell_4': [batch_size, 14, 14, 540],
'Cell_5': [batch_size, 14, 14, 540],
'Cell_6': [batch_size, 7, 7, 1080],
'Cell_7': [batch_size, 7, 7, 1080],
'Cell_8': [batch_size, 7, 7, 1080],
'global_pool': [batch_size, 1080],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
}
self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testNoAuxHeadMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testOverrideHParamsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def testOverrideHParamsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(),
[batch_size, 135, 28, 28])
def testUseBoundedAcitvationMobileModel(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, _ = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/pnasnet_test.py | pnasnet_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for the NASNet classification networks.
Paper: https://arxiv.org/abs/1707.07012
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet_utils
arg_scope = slim.arg_scope
# Notes for training NASNet Cifar Model
# -------------------------------------
# batch_size: 32
# learning rate: 0.025
# cosine (single period) learning rate decay
# auxiliary head loss weighting: 0.4
# clip global norm of all gradients by 5
def cifar_config():
return contrib_training.HParams(
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=18,
use_aux_head=1,
num_conv_filters=32,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
# 600 epochs with a batch size of 32
# This is used for the drop path probabilities since it needs to increase
# the drop out probability over the course of training.
total_training_steps=937500,
use_bounded_activation=False,
)
# Notes for training large NASNet model on ImageNet
# -------------------------------------
# batch size (per replica): 16
# learning rate: 0.015 * 100
# learning rate decay factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 100 replicas
# auxiliary head loss weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def large_imagenet_config():
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=18,
filter_scaling_rate=2.0,
num_conv_filters=168,
drop_path_keep_prob=0.7,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
# Notes for training the mobile NASNet ImageNet model
# -------------------------------------
# batch size (per replica): 32
# learning rate: 0.04 * 50
# learning rate scaling factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 50 replicas
# auxiliary head weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def mobile_imagenet_config():
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
drop_path_keep_prob=1.0,
num_conv_filters=44,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
total_training_steps=250000,
use_bounded_activation=False,
)
def _update_hparams(hparams, is_training):
"""Update hparams for given is_training option."""
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
def nasnet_cifar_arg_scope(weight_decay=5e-4,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
"""Defines the default arg scope for the NASNet-A Cifar model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Cifar Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = slim.l2_regularizer(weight_decay)
weights_initializer = slim.variance_scaling_initializer(mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Mobile ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Mobile Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = slim.l2_regularizer(weight_decay)
weights_initializer = slim.variance_scaling_initializer(mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_large_arg_scope(weight_decay=5e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Large ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Large Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = slim.l2_regularizer(weight_decay)
weights_initializer = slim.variance_scaling_initializer(mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets."""
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
with tf.variable_scope(scope):
aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
aux_logits = activation_fn(aux_logits)
# Shape of feature map before the final layer.
shape = aux_logits.shape
if hparams.data_format == 'NHWC':
shape = shape[1:3]
else:
shape = shape[2:4]
aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = activation_fn(aux_logits)
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_points['AuxLogits'] = aux_logits
def _imagenet_stem(inputs, hparams, stem_cell, current_step=None):
"""Stem used for models trained on ImageNet."""
num_stem_cells = 2
# 149 x 149 x 32
num_stem_filters = int(32 * hparams.stem_multiplier)
net = slim.conv2d(
inputs, num_stem_filters, [3, 3], stride=2, scope='conv0',
padding='VALID')
net = slim.batch_norm(net, scope='conv0_bn')
# Run the reduction cells
cell_outputs = [None, net]
filter_scaling = 1.0 / (hparams.filter_scaling_rate**num_stem_cells)
for cell_num in range(num_stem_cells):
net = stem_cell(
net,
scope='cell_stem_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=cell_num,
current_step=current_step)
cell_outputs.append(net)
filter_scaling *= hparams.filter_scaling_rate
return net, cell_outputs
def _cifar_stem(inputs, hparams):
"""Stem used for models trained on Cifar."""
num_stem_filters = int(hparams.num_conv_filters * hparams.stem_multiplier)
net = slim.conv2d(
inputs,
num_stem_filters,
3,
scope='l1_stem_3x3')
net = slim.batch_norm(net, scope='l1_stem_bn')
return net, [None, net]
def build_nasnet_cifar(images, num_classes,
is_training=True,
config=None,
current_step=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = cifar_config() if config is None else copy.deepcopy(config)
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='cifar',
current_step=current_step)
build_nasnet_cifar.default_image_size = 32
def build_nasnet_mobile(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Mobile model for the ImageNet Dataset."""
hparams = (mobile_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_mobile.default_image_size = 224
def build_nasnet_large(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Large model for the ImageNet Dataset."""
hparams = (large_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_large.default_image_size = 331
def _build_nasnet_base(images,
normal_cell,
reduction_cell,
num_classes,
hparams,
is_training,
stem_type,
final_endpoint=None,
current_step=None):
"""Constructs a NASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
stem_cell = reduction_cell
if stem_type == 'imagenet':
stem = lambda: _imagenet_stem(images, hparams, stem_cell)
elif stem_type == 'cifar':
stem = lambda: _cifar_stem(images, hparams)
else:
raise ValueError('Unknown stem_type: ', stem_type)
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net): return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2 if stem_type == 'imagenet' else 0
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
stride = 1
if hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
if cell_num in reduction_indices:
filter_scaling *= hparams.filter_scaling_rate
net = reduction_cell(
net,
scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint(
'Reduction_Cell_{}'.format(reduction_indices.index(cell_num)), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if not hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
_build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
cell_outputs.append(net)
# Final softmax layer
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/nasnet.py | nasnet.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
arg_scope = slim.arg_scope
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
# The cap for tf.clip_by_value, it's hinted from the activation distribution
# that the majority of activation values are in the range [-6, 6].
CLIP_BY_VALUE_CAP = 6
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@slim.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@slim.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@slim.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(input_tensor=x, axis=[1, 2])
else:
return tf.reduce_mean(input_tensor=x, axis=[2, 3])
@slim.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool2d(
net,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool2d(
path2,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@slim.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(input=net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random.uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size,
use_bounded_activation):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
for layer_num in range(num_layers - 1):
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation, use_bounded_activation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if use_bounded_activation:
net = tf.nn.relu6(net)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
self._use_bounded_activation = use_bounded_activation
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
activation_fn = tf.nn.relu6 if self._use_bounded_activation else tf.nn.relu
if curr_filter_shape != prev_filter_shape:
prev_layer = activation_fn(prev_layer)
prev_layer = factorized_reduction(
prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = activation_fn(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu6(net) if self._use_bounded_activation else tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
# num_or_size_splits=1
net = [net]
net.append(prev_layer)
return net
def __call__(self, net, scope=None, filter_scaling=1, stride=1,
prev_layer=None, cell_num=-1, current_step=None):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i+1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left,
current_step)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right,
current_step)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
if self._use_bounded_activation:
h = tf.nn.relu6(h)
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation,
stride, is_from_original_input, current_step):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size,
self._use_bounded_activation)
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif operation in ['none']:
if self._use_bounded_activation:
net = tf.nn.relu6(net)
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
if not self._use_bounded_activation:
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif 'pool' in operation:
net = _pooling(net, stride, operation, self._use_bounded_activation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net, current_step=current_step)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(
net[idx], final_num_filters, stride)
states_to_combine = (
[h for h, is_used in zip(net, used_hiddenstates) if not is_used])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@slim.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if current_step is None:
current_step = tf.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_3x3_2',
'separable_5x5_2',
'separable_3x3_2',
'avg_pool_3x3',
'none',
'avg_pool_3x3',
'avg_pool_3x3',
'separable_3x3_2',
'none']
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_7x7_2',
'max_pool_3x3',
'separable_7x7_2',
'avg_pool_3x3',
'separable_5x5_2',
'none',
'avg_pool_3x3',
'separable_3x3_2',
'max_pool_3x3']
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/nasnet_utils.py | nasnet_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nasnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets.nasnet import nasnet
class NASNetTest(tf.test.TestCase):
def testBuildLogitsCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
logits, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
logits, end_points = nasnet.build_nasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
net, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 768])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
net, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1056])
def testBuildPreLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = None
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
net, end_points = nasnet.build_nasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4032])
def testAllEndPointsShapesCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 32, 32, 96],
'Cell_0': [batch_size, 32, 32, 192],
'Cell_1': [batch_size, 32, 32, 192],
'Cell_2': [batch_size, 32, 32, 192],
'Cell_3': [batch_size, 32, 32, 192],
'Cell_4': [batch_size, 32, 32, 192],
'Cell_5': [batch_size, 32, 32, 192],
'Cell_6': [batch_size, 16, 16, 384],
'Cell_7': [batch_size, 16, 16, 384],
'Cell_8': [batch_size, 16, 16, 384],
'Cell_9': [batch_size, 16, 16, 384],
'Cell_10': [batch_size, 16, 16, 384],
'Cell_11': [batch_size, 16, 16, 384],
'Cell_12': [batch_size, 8, 8, 768],
'Cell_13': [batch_size, 8, 8, 768],
'Cell_14': [batch_size, 8, 8, 768],
'Cell_15': [batch_size, 8, 8, 768],
'Cell_16': [batch_size, 8, 8, 768],
'Cell_17': [batch_size, 8, 8, 768],
'Reduction_Cell_0': [batch_size, 16, 16, 256],
'Reduction_Cell_1': [batch_size, 8, 8, 512],
'global_pool': [batch_size, 768],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.cifar_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, end_points = nasnet.build_nasnet_cifar(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
_, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 28, 28, 88],
'Cell_0': [batch_size, 28, 28, 264],
'Cell_1': [batch_size, 28, 28, 264],
'Cell_2': [batch_size, 28, 28, 264],
'Cell_3': [batch_size, 28, 28, 264],
'Cell_4': [batch_size, 14, 14, 528],
'Cell_5': [batch_size, 14, 14, 528],
'Cell_6': [batch_size, 14, 14, 528],
'Cell_7': [batch_size, 14, 14, 528],
'Cell_8': [batch_size, 7, 7, 1056],
'Cell_9': [batch_size, 7, 7, 1056],
'Cell_10': [batch_size, 7, 7, 1056],
'Cell_11': [batch_size, 7, 7, 1056],
'Reduction_Cell_0': [batch_size, 14, 14, 352],
'Reduction_Cell_1': [batch_size, 7, 7, 704],
'global_pool': [batch_size, 1056],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
_, end_points = nasnet.build_nasnet_mobile(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
_, end_points = nasnet.build_nasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 336],
'Cell_0': [batch_size, 42, 42, 1008],
'Cell_1': [batch_size, 42, 42, 1008],
'Cell_2': [batch_size, 42, 42, 1008],
'Cell_3': [batch_size, 42, 42, 1008],
'Cell_4': [batch_size, 42, 42, 1008],
'Cell_5': [batch_size, 42, 42, 1008],
'Cell_6': [batch_size, 21, 21, 2016],
'Cell_7': [batch_size, 21, 21, 2016],
'Cell_8': [batch_size, 21, 21, 2016],
'Cell_9': [batch_size, 21, 21, 2016],
'Cell_10': [batch_size, 21, 21, 2016],
'Cell_11': [batch_size, 21, 21, 2016],
'Cell_12': [batch_size, 11, 11, 4032],
'Cell_13': [batch_size, 11, 11, 4032],
'Cell_14': [batch_size, 11, 11, 4032],
'Cell_15': [batch_size, 11, 11, 4032],
'Cell_16': [batch_size, 11, 11, 4032],
'Cell_17': [batch_size, 11, 11, 4032],
'Reduction_Cell_0': [batch_size, 21, 21, 1344],
'Reduction_Cell_1': [batch_size, 11, 11, 2688],
'global_pool': [batch_size, 4032],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
_, end_points = nasnet.build_nasnet_large(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testVariablesSetDeviceMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes)
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testUnknownBatchSizeMobileModel(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, _ = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random.uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluationMobileModel(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, _ = nasnet.build_nasnet_mobile(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(input=logits, axis=1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testOverrideHParamsCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.cifar_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, end_points = nasnet.build_nasnet_cifar(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 96, 32, 32])
def testOverrideHParamsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
_, end_points = nasnet.build_nasnet_mobile(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 88, 28, 28])
def testOverrideHParamsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random.uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
_, end_points = nasnet.build_nasnet_large(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 336, 42, 42])
def testCurrentStepCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random.uniform((batch_size, height, width, 3))
global_step = tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
logits, end_points = nasnet.build_nasnet_cifar(inputs,
num_classes,
current_step=global_step)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testUseBoundedAcitvationCifarModel(self):
batch_size = 1
height, width = 32, 32
num_classes = 10
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random.uniform((batch_size, height, width, 3))
config = nasnet.cifar_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
_, _ = nasnet.build_nasnet_cifar(
inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/nasnet_test.py | nasnet_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for the PNASNet classification networks.
Paper: https://arxiv.org/abs/1712.00559
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
arg_scope = slim.arg_scope
def large_imagenet_config():
"""Large ImageNet configuration based on PNASNet-5."""
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
num_conv_filters=216,
drop_path_keep_prob=0.6,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def mobile_imagenet_config():
"""Mobile ImageNet configuration based on PNASNet-5."""
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=9,
filter_scaling_rate=2.0,
num_conv_filters=54,
drop_path_keep_prob=1.0,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def pnasnet_large_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Large ImageNet model."""
return nasnet.nasnet_large_arg_scope(
weight_decay, batch_norm_decay, batch_norm_epsilon)
def pnasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Mobile ImageNet model."""
return nasnet.nasnet_mobile_arg_scope(weight_decay, batch_norm_decay,
batch_norm_epsilon)
def _build_pnasnet_base(images,
normal_cell,
num_classes,
hparams,
is_training,
final_endpoint=None):
"""Constructs a PNASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
# pylint: disable=protected-access
stem = lambda: nasnet._imagenet_stem(images, hparams, normal_cell)
# pylint: enable=protected-access
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net):
return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
is_reduction = cell_num in reduction_indices
stride = 2 if is_reduction else 1
if is_reduction: filter_scaling *= hparams.filter_scaling_rate
if hparams.skip_reduction_layer_input or not is_reduction:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
# pylint: disable=protected-access
nasnet._build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
# pylint: enable=protected-access
# Final softmax layer
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
def build_pnasnet_large(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Large model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else large_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d,
slim.batch_norm, slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_large.default_image_size = 331
def build_pnasnet_mobile(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Mobile model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else mobile_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_mobile.default_image_size = 224
class PNasNetNormalCell(nasnet_utils.NasNetABaseCell):
"""PNASNet Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
# Configuration for the PNASNet-5 model.
operations = [
'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',
'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',
'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]
super(PNasNetNormalCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps,
use_bounded_activation)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/pnasnet.py | pnasnet.py |
123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/__init__.py | __init__.py |
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.nasnet.nasnet_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from nets.nasnet import nasnet_utils
class NasnetUtilsTest(tf.test.TestCase):
def testCalcReductionLayers(self):
num_cells = 18
num_reduction_layers = 2
reduction_layers = nasnet_utils.calc_reduction_layers(
num_cells, num_reduction_layers)
self.assertEqual(len(reduction_layers), 2)
self.assertEqual(reduction_layers[0], 6)
self.assertEqual(reduction_layers[1], 12)
def testGetChannelIndex(self):
data_formats = ['NHWC', 'NCHW']
for data_format in data_formats:
index = nasnet_utils.get_channel_index(data_format)
correct_index = 3 if data_format == 'NHWC' else 1
self.assertEqual(index, correct_index)
def testGetChannelDim(self):
data_formats = ['NHWC', 'NCHW']
shape = [10, 20, 30, 40]
for data_format in data_formats:
dim = nasnet_utils.get_channel_dim(shape, data_format)
correct_dim = shape[3] if data_format == 'NHWC' else shape[1]
self.assertEqual(dim, correct_dim)
def testGlobalAvgPool(self):
data_formats = ['NHWC', 'NCHW']
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
for data_format in data_formats:
output = nasnet_utils.global_avg_pool(
inputs, data_format)
self.assertEqual(output.shape, [5, 10])
def test_factorized_reduction(self):
data_format = 'NHWC'
output_shape = (5, 10, 20, 16)
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
output = nasnet_utils.factorized_reduction(
inputs, 16, stride=1, data_format=data_format)
self.assertSequenceEqual(output_shape, output.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/nasnet_utils_test.py | nasnet_utils_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images in CIFAR-10.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
_PADDING = 4
def preprocess_for_train(image,
output_height,
output_width,
padding=_PADDING,
add_image_summaries=True,
use_grayscale=False):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
add_image_summaries: Enable image summaries.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if add_image_summaries:
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
if padding > 0:
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image,
[output_height, output_width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
if add_image_summaries:
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(distorted_image)
def preprocess_for_eval(image,
output_height,
output_width,
add_image_summaries=True,
use_grayscale=False):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
add_image_summaries: Enable image summaries.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if add_image_summaries:
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
# Resize and crop if needed.
resized_image = tf.image.resize_image_with_crop_or_pad(image,
output_width,
output_height)
if add_image_summaries:
tf.summary.image('resized_image', tf.expand_dims(resized_image, 0))
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(resized_image)
def preprocess_image(image,
output_height,
output_width,
is_training=False,
add_image_summaries=True,
use_grayscale=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
add_image_summaries: Enable image summaries.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(
image,
output_height,
output_width,
add_image_summaries=add_image_summaries,
use_grayscale=use_grayscale)
else:
return preprocess_for_eval(
image,
output_height,
output_width,
add_image_summaries=add_image_summaries,
use_grayscale=use_grayscale)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/preprocessing/cifarnet_preprocessing.py | cifarnet_preprocessing.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
def get_preprocessing(name, is_training=False, use_grayscale=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'mobilenet_v1': inception_preprocessing,
'mobilenet_v2': inception_preprocessing,
'mobilenet_v2_035': inception_preprocessing,
'mobilenet_v3_small': inception_preprocessing,
'mobilenet_v3_large': inception_preprocessing,
'mobilenet_v3_small_minimalistic': inception_preprocessing,
'mobilenet_v3_large_minimalistic': inception_preprocessing,
'mobilenet_edgetpu': inception_preprocessing,
'mobilenet_edgetpu_075': inception_preprocessing,
'mobilenet_v2_140': inception_preprocessing,
'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing,
'pnasnet_mobile': inception_preprocessing,
'pnasnet_large': inception_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v1_200': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'resnet_v2_200': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image,
output_height,
output_width,
is_training=is_training,
use_grayscale=use_grayscale,
**kwargs)
return preprocessing_fn
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/preprocessing/preprocessing_factory.py | preprocessing_factory.py |
123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/preprocessing/__init__.py | __init__.py |
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities for preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def preprocess_image(image,
output_height,
output_width,
is_training,
use_grayscale=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
del is_training # Unused argument
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize_image_with_crop_or_pad(
image, output_width, output_height)
image = tf.subtract(image, 128.0)
image = tf.div(image, 128.0)
return image
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/preprocessing/lenet_preprocessing.py | lenet_preprocessing.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image,
height,
width,
bbox,
fast_mode=True,
scope=None,
add_image_summaries=True,
random_crop=True,
use_grayscale=False):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
random_crop: Enable random cropping of images during preprocessing for
training.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
if add_image_summaries:
tf.summary.image('image_with_bounding_boxes', image_with_box)
if not random_crop:
distorted_image = image
else:
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
if add_image_summaries:
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image(('cropped_' if random_crop else '') + 'resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if use_grayscale:
distorted_image = tf.image.rgb_to_grayscale(distorted_image)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image,
height,
width,
central_fraction=0.875,
scope=None,
central_crop=True,
use_grayscale=False):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
central_crop: Enable central cropping of images during preprocessing for
evaluation.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_crop and central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image,
height,
width,
is_training=False,
bbox=None,
fast_mode=True,
add_image_summaries=True,
crop_image=True,
use_grayscale=False):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
crop_image: Whether to enable cropping of images during preprocessing for
both training and evaluation.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(
image,
height,
width,
bbox,
fast_mode,
add_image_summaries=add_image_summaries,
random_crop=crop_image,
use_grayscale=use_grayscale)
else:
return preprocess_for_eval(
image,
height,
width,
central_crop=crop_image,
use_grayscale=use_grayscale)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/preprocessing/inception_preprocessing.py | inception_preprocessing.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
The preprocessing steps for VGG were introduced in the following technical
report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
with tf.control_dependencies(asserts):
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(tf.rint(height * scale))
new_width = tf.to_int32(tf.rint(width * scale))
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def preprocess_for_train(image,
output_height,
output_width,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX,
use_grayscale=False):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
resize_side = tf.random_uniform(
[], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32)
image = _aspect_preserving_resize(image, resize_side)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
image = tf.image.random_flip_left_right(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_for_eval(image,
output_height,
output_width,
resize_side,
use_grayscale=False):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
image = _aspect_preserving_resize(image, resize_side)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_image(image,
output_height,
output_width,
is_training=False,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX,
use_grayscale=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, this value is
ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width,
resize_side_min, resize_side_max,
use_grayscale)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min, use_grayscale)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/preprocessing/vgg_preprocessing.py | vgg_preprocessing.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts MNIST data to TFRecords of TF-Example protos.
This module downloads the MNIST data, uncompresses it, reads the files
that make up the MNIST data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy as np
from six.moves import urllib
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
_IMAGE_SIZE = 28
_NUM_CHANNELS = 1
# The names of the classes.
_CLASS_NAMES = [
'zero',
'one',
'two',
'three',
'four',
'five',
'size',
'seven',
'eight',
'nine',
]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _add_to_tfrecord(data_filename, labels_filename, num_images,
tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
images = _extract_images(data_filename, num_images)
labels = _extract_labels(labels_filename, num_images)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(
png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_%s.tfrecord' % (dataset_dir, split_name)
def _download_dataset(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL + filename,
filepath,
_progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/download_and_convert_mnist.py | download_and_convert_mnist.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the Cifar10 dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_cifar10.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = 'cifar10_%s.tfrecord'
SPLITS_TO_SIZES = {'train': 50000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [32 x 32 x 3] color image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading cifar10.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if not reader:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[32, 32, 3]),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/cifar10.py | cifar10.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the ImageNet ILSVRC 2012 Dataset plus some bounding boxes.
Some images have one or more bounding boxes associated with the label of the
image. See details here: http://image-net.org/download-bboxes
ImageNet is based upon WordNet 3.0. To uniquely identify a synset, we use
"WordNet ID" (wnid), which is a concatenation of POS ( i.e. part of speech )
and SYNSET OFFSET of WordNet. For more information, please refer to the
WordNet documentation[http://wordnet.princeton.edu/wordnet/documentation/].
"There are bounding boxes for over 3000 popular synsets available.
For each synset, there are on average 150 images with bounding boxes."
WARNING: Don't use for object detection, in this case all the bounding boxes
of the image belong to just one class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import urllib
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
# TODO(nsilberman): Add tfrecord file type once the script is updated.
_FILE_PATTERN = '%s-*'
_SPLITS_TO_SIZES = {
'train': 1281167,
'validation': 50000,
}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'label': 'The label id of the image, integer between 0 and 999',
'label_text': 'The text of the label.',
'object/bbox': 'A list of bounding boxes.',
'object/label': 'A list of labels, one per each object.',
}
_NUM_CLASSES = 1001
# If set to false, will not try to set label_to_names in dataset
# by reading them from labels.txt or github.
LOAD_READABLE_NAMES = True
def create_readable_names_for_imagenet_labels():
"""Create a dict mapping label id to human readable string.
Returns:
labels_to_names: dictionary where keys are integers from to 1000
and values are human-readable names.
We retrieve a synset file, which contains a list of valid synset labels used
by ILSVRC competition. There is one synset one per line, eg.
# n01440764
# n01443537
We also retrieve a synset_to_human_file, which contains a mapping from synsets
to human-readable names for every synset in Imagenet. These are stored in a
tsv format, as follows:
# n02119247 black fox
# n02119359 silver fox
We assign each synset (in alphabetical order) an integer, starting from 1
(since 0 is reserved for the background class).
Code is based on
https://github.com/tensorflow/models/blob/master/research/inception/inception/data/build_imagenet_data.py#L463
"""
# pylint: disable=g-line-too-long
base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/'
synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)
synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)
filename, _ = urllib.request.urlretrieve(synset_url)
synset_list = [s.strip() for s in open(filename).readlines()]
num_synsets_in_ilsvrc = len(synset_list)
assert num_synsets_in_ilsvrc == 1000
filename, _ = urllib.request.urlretrieve(synset_to_human_url)
synset_to_human_list = open(filename).readlines()
num_synsets_in_all_imagenet = len(synset_to_human_list)
assert num_synsets_in_all_imagenet == 21842
synset_to_human = {}
for s in synset_to_human_list:
parts = s.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
label_index = 1
labels_to_names = {0: 'background'}
for synset in synset_list:
name = synset_to_human[synset]
labels_to_names[label_index] = name
label_index += 1
return labels_to_names
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg'),
'image/class/label': tf.FixedLenFeature(
[], dtype=tf.int64, default_value=-1),
'image/class/text': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
'image/object/bbox/xmin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(
dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
'label_text': slim.tfexample_decoder.Tensor('image/class/text'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if LOAD_READABLE_NAMES:
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
else:
labels_to_names = create_readable_names_for_imagenet_labels()
dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/imagenet.py | imagenet.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the MNIST dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_mnist.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = 'mnist_%s.tfrecord'
_SPLITS_TO_SIZES = {'train': 60000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [28 x 28 x 1] grayscale image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading MNIST.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/class/label': tf.FixedLenFeature(
[1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1),
'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
num_classes=_NUM_CLASSES,
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
labels_to_names=labels_to_names)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/mnist.py | mnist.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A factory-pattern class which returns classification image/label pairs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets import cifar10
from datasets import flowers
from datasets import imagenet
from datasets import mnist
from datasets import visualwakewords
datasets_map = {
'cifar10': cifar10,
'flowers': flowers,
'imagenet': imagenet,
'mnist': mnist,
'visualwakewords': visualwakewords,
}
def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None):
"""Given a dataset name and a split_name returns a Dataset.
Args:
name: String, the name of the dataset.
split_name: A train/test split name.
dataset_dir: The directory where the dataset files are stored.
file_pattern: The file pattern to use for matching the dataset source files.
reader: The subclass of tf.ReaderBase. If left as `None`, then the default
reader defined by each dataset is used.
Returns:
A `Dataset` class.
Raises:
ValueError: If the dataset `name` is unknown.
"""
if name not in datasets_map:
raise ValueError('Name of dataset unknown %s' % name)
return datasets_map[name].get_split(
split_name,
dataset_dir,
file_pattern,
reader)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/dataset_factory.py | dataset_factory.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 350
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
flower_root = os.path.join(dataset_dir, 'flower_photos')
directories = []
class_names = []
for filename in os.listdir(flower_root):
path = os.path.join(flower_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'flowers_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.GFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'flower_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(
list(zip(class_names, list(range(len(class_names))))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(
list(zip(list(range(len(class_names))), class_names)))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/download_and_convert_flowers.py | download_and_convert_flowers.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
This script is called as
process_bounding_boxes.py <dir> [synsets-file]
Where <dir> is a directory containing the downloaded and unpacked bounding box
data. If [synsets-file] is supplied, then only the bounding boxes whose
synstes are contained within this file are returned. Note that the
[synsets-file] file contains synset ids, one per line.
The script dumps out a CSV text file in which each line contains an entry.
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
The entry can be read as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
The bounding box for <JPEG file name> contains two points (xmin, ymin) and
(xmax, ymax) specifying the lower-left corner and upper-right corner of a
bounding box in *relative* coordinates.
The user supplies a directory where the XML files reside. The directory
structure in the directory <dir> is assumed to look like this:
<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
Each XML file contains a bounding box annotation. The script:
(1) Parses the XML file and extracts the filename, label and bounding box info.
(2) The bounding box is specified in the XML files as integer (xmin, ymin) and
(xmax, ymax) *relative* to image size displayed to the human annotator. The
size of the image displayed to the human annotator is stored in the XML file
as integer (height, width).
Note that the displayed size will differ from the actual size of the image
downloaded from image-net.org. To make the bounding box annotation useable,
we convert bounding box to floating point numbers relative to displayed
height and width of the image.
Note that each XML file might contain N bounding box annotations.
Note that the points are all clamped at a range of [0.0, 1.0] because some
human annotations extend outside the range of the supplied image.
See details here: http://image-net.org/download-bboxes
(3) By default, the script outputs all valid bounding boxes. If a
[synsets-file] is supplied, only the subset of bounding boxes associated
with those synsets are outputted. Importantly, one can supply a list of
synsets in the ImageNet Challenge and output the list of bounding boxes
associated with the training images of the ILSVRC.
We use these bounding boxes to inform the random distortion of images
supplied to the network.
If you run this script successfully, you will see the following output
to stderr:
> Finished processing 544546 XML files.
> Skipped 0 XML files not in ImageNet Challenge.
> Skipped 0 bounding boxes not in ImageNet Challenge.
> Wrote 615299 bounding boxes from 544546 annotated images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import xml.etree.ElementTree as ET
from six.moves import xrange # pylint: disable=redefined-builtin
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
return int(GetItem(name, root, index))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in xrange(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Invalid usage\n'
'usage: process_bounding_boxes.py <dir> [synsets-file]',
file=sys.stderr)
sys.exit(-1)
xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
file=sys.stderr)
if len(sys.argv) == 3:
labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
file=sys.stderr)
else:
labels = None
skipped_boxes = 0
skipped_files = 0
saved_boxes = 0
saved_files = 0
for file_index, one_file in enumerate(xml_files):
# Example: <...>/n06470073/n00141669_6790.xml
label = os.path.basename(os.path.dirname(one_file))
# Determine if the annotation is from an ImageNet Challenge label.
if labels is not None and label not in labels:
skipped_files += 1
continue
bboxes = ProcessXMLAnnotation(one_file)
assert bboxes is not None, 'No bounding boxes found in ' + one_file
found_box = False
for bbox in bboxes:
if labels is not None:
if bbox.label != label:
# Note: There is a slight bug in the bounding box annotation data.
# Many of the dog labels have the human label 'Scottish_deerhound'
# instead of the synset ID 'n02092002' in the bbox.label field. As a
# simple hack to overcome this issue, we only exclude bbox labels
# *which are synset ID's* that do not match original synset label for
# the XML file.
if bbox.label in labels:
skipped_boxes += 1
continue
# Guard against improperly specified boxes.
if (bbox.xmin_scaled >= bbox.xmax_scaled or
bbox.ymin_scaled >= bbox.ymax_scaled):
skipped_boxes += 1
continue
# Note bbox.filename occasionally contains '%s' in the name. This is
# data set noise that is fixed by just using the basename of the XML file.
image_filename = os.path.splitext(os.path.basename(one_file))[0]
print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
(image_filename,
bbox.xmin_scaled, bbox.ymin_scaled,
bbox.xmax_scaled, bbox.ymax_scaled))
saved_boxes += 1
found_box = True
if found_box:
saved_files += 1
else:
skipped_files += 1
if not file_index % 5000:
print('--> processed %d of %d XML files.' %
(file_index + 1, len(xml_files)),
file=sys.stderr)
print('--> skipped %d boxes and %d XML files.' %
(skipped_boxes, skipped_files), file=sys.stderr)
print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
file=sys.stderr)
print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
file=sys.stderr)
print('Wrote %d bounding boxes from %d annotated images.' %
(saved_boxes, saved_files),
file=sys.stderr)
print('Finished.', file=sys.stderr)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/process_bounding_boxes.py | process_bounding_boxes.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts VisualWakewords data to TFRecords of TF-Example protos.
This module downloads the COCO dataset, uncompresses it, derives the
VisualWakeWords dataset to create two TFRecord datasets: one for
train and one for test. Each TFRecord dataset is comprised of a set of
TF-Example protocol buffers, each of which contain a single image and label.
The script should take several minutes to run.
Please note that this tool creates sharded output files.
VisualWakeWords dataset is used to design tiny models classifying two classes,
such as person/not-person. The two steps to generate the VisualWakeWords
dataset from the COCO dataset are given below:
1. Use COCO annotations to create VisualWakeWords annotations:
Note: A bounding box is 'valid' if it has the foreground_class_of_interest
(e.g. person) and it's area is greater than 0.5% of the image area.
The resulting annotations file has the following fields, where 'images' are
the same as COCO dataset. 'categories' only contains information about the
foreground_class_of_interest (e.g. person) and 'annotations' maps an image to
objects (a list of valid bounding boxes) and label (value is 1 if it has
atleast one valid bounding box, otherwise 0)
images[{
"id", "width", "height", "file_name", "flickr_url", "coco_url",
"license", "date_captured",
}]
categories{
"id": {"id", "name", "supercategory"}
}
annotations{
"image_id": {"objects":[{"area", "bbox" : [x,y,width,height]}], "label"}
}
2. Use VisualWakeWords annotations to create TFRecords:
The resulting TFRecord file contains the following features:
{ image/height, image/width, image/source_id, image/encoded,
image/class/label_text, image/class/label,
image/object/class/text,
image/object/bbox/ymin, image/object/bbox/xmin, image/object/bbox/ymax,
image/object/bbox/xmax, image/object/area
image/filename, image/format, image/key/sha256}
For classification models, you need the image/encoded and image/class/label.
Example usage:
Run download_and_convert_data.py in the parent directory as follows:
python download_and_convert_visualwakewords.py --logtostderr \
--dataset_name=visualwakewords \
--dataset_dir="${DATASET_DIR}" \
--small_object_area_threshold=0.005 \
--foreground_class_of_interest='person'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from datasets import download_and_convert_visualwakewords_lib
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.flags.DEFINE_string(
'coco_dirname', 'coco_dataset',
'A subdirectory in visualwakewords dataset directory'
'containing the coco dataset')
FLAGS = tf.app.flags.FLAGS
def run(dataset_dir, small_object_area_threshold, foreground_class_of_interest):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
small_object_area_threshold: Threshold of fraction of image area below which
small objects are filtered
foreground_class_of_interest: Build a binary classifier based on the
presence or absence of this object in the image.
"""
# 1. Download the coco dataset into a subdirectory under the visualwakewords
# dataset directory
coco_dir = os.path.join(dataset_dir, FLAGS.coco_dirname)
if not tf.gfile.IsDirectory(coco_dir):
tf.gfile.MakeDirs(coco_dir)
download_and_convert_visualwakewords_lib.download_coco_dataset(coco_dir)
# Path to COCO annotations
train_annotations_file = os.path.join(coco_dir, 'annotations',
'instances_train2014.json')
val_annotations_file = os.path.join(coco_dir, 'annotations',
'instances_val2014.json')
train_image_dir = os.path.join(coco_dir, 'train2014')
val_image_dir = os.path.join(coco_dir, 'val2014')
# Path to VisualWakeWords annotations
visualwakewords_annotations_train = os.path.join(
dataset_dir, 'instances_visualwakewords_train2014.json')
visualwakewords_annotations_val = os.path.join(
dataset_dir, 'instances_visualwakewords_val2014.json')
visualwakewords_labels_filename = os.path.join(dataset_dir, 'labels.txt')
train_output_path = os.path.join(dataset_dir, 'train.record')
val_output_path = os.path.join(dataset_dir, 'val.record')
# 2. Create a labels file
tf.logging.info('Creating a labels file...')
download_and_convert_visualwakewords_lib.create_labels_file(
foreground_class_of_interest, visualwakewords_labels_filename)
# 3. Use COCO annotations to create VisualWakeWords annotations
tf.logging.info('Creating train VisualWakeWords annotations...')
download_and_convert_visualwakewords_lib.create_visual_wakeword_annotations(
train_annotations_file, visualwakewords_annotations_train,
small_object_area_threshold, foreground_class_of_interest)
tf.logging.info('Creating validation VisualWakeWords annotations...')
download_and_convert_visualwakewords_lib.create_visual_wakeword_annotations(
val_annotations_file, visualwakewords_annotations_val,
small_object_area_threshold, foreground_class_of_interest)
# 4. Use VisualWakeWords annotations to create the TFRecords
tf.logging.info('Creating train TFRecords for VisualWakeWords dataset...')
download_and_convert_visualwakewords_lib.create_tf_record_for_visualwakewords_dataset(
visualwakewords_annotations_train,
train_image_dir,
train_output_path,
num_shards=100)
tf.logging.info(
'Creating validation TFRecords for VisualWakeWords dataset...')
download_and_convert_visualwakewords_lib.create_tf_record_for_visualwakewords_dataset(
visualwakewords_annotations_val,
val_image_dir,
val_output_path,
num_shards=10)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/download_and_convert_visualwakewords.py | download_and_convert_visualwakewords.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for Visual WakeWords Dataset with images+labels.
Visual WakeWords Dataset derives from the COCO dataset to design tiny models
classifying two classes, such as person/not-person. The COCO annotations
are filtered to two classes: person and not-person (or another user-defined
category). Bounding boxes for small objects with area less than 5% of the image
area are filtered out.
See build_visualwakewords_data.py which generates the Visual WakeWords dataset
annotations from the raw COCO dataset and converts them to TFRecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = '%s.record-*'
_SPLITS_TO_SIZES = {
'train': 82783,
'val': 40504,
}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'label': 'The label id of the image, an integer in {0, 1}',
'object/bbox': 'A list of bounding boxes.',
}
_NUM_CLASSES = 2
# labels file
LABELS_FILENAME = 'labels.txt'
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources. It
is assumed that the pattern contains a '%s' string so that the split name
can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
}
items_to_handlers = {
'image':
slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'label':
slim.tfexample_decoder.Tensor('image/class/label'),
'object/bbox':
slim.tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
labels_to_names = None
labels_file = os.path.join(dataset_dir, LABELS_FILENAME)
if tf.gfile.Exists(labels_file):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/visualwakewords.py | visualwakewords.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utilities for downloading and converting datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
import zipfile
from six.moves import urllib
import tensorflow.compat.v1 as tf
LABELS_FILENAME = 'labels.txt'
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_list_feature(values):
"""Returns a TF-Feature of list of bytes.
Args:
values: A string or list of strings.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def float_list_feature(values):
"""Returns a TF-Feature of list of floats.
Args:
values: A float or list of floats.
Returns:
A TF-Feature.
"""
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def float_feature(values):
"""Returns a TF-Feature of floats.
Args:
values: A scalar of list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/class/label': int64_feature(class_id),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}))
def download_url(url, dataset_dir):
"""Downloads the tarball or zip file from url into filepath.
Args:
url: The URL of a tarball or zip file.
dataset_dir: The directory where the temporary files are stored.
Returns:
filepath: path where the file is downloaded.
"""
filename = url.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def download_and_uncompress_tarball(tarball_url, dataset_dir):
"""Downloads the `tarball_url` and uncompresses it locally.
Args:
tarball_url: The URL of a tarball file.
dataset_dir: The directory where the temporary files are stored.
"""
filepath = download_url(tarball_url, dataset_dir)
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
def download_and_uncompress_zipfile(zip_url, dataset_dir):
"""Downloads the `zip_url` and uncompresses it locally.
Args:
zip_url: The URL of a zip file.
dataset_dir: The directory where the temporary files are stored.
"""
filename = zip_url.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
if tf.gfile.Exists(filepath):
print('File {filename} has been already downloaded at {filepath}. '
'Unzipping it....'.format(filename=filename, filepath=filepath))
else:
filepath = download_url(zip_url, dataset_dir)
with zipfile.ZipFile(filepath, 'r') as zip_file:
for member in zip_file.namelist():
memberpath = os.path.join(dataset_dir, member)
# extract only if file doesn't exist
if not (os.path.exists(memberpath) or os.path.isfile(memberpath)):
zip_file.extract(member, dataset_dir)
def write_label_file(labels_to_class_names,
dataset_dir,
filename=LABELS_FILENAME):
"""Writes a file with the list of class names.
Args:
labels_to_class_names: A map of (integer) labels to class names.
dataset_dir: The directory in which the labels file should be written.
filename: The filename where the class names are written.
"""
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'w') as f:
for label in labels_to_class_names:
class_name = labels_to_class_names[label]
f.write('%d:%s\n' % (label, class_name))
def has_labels(dataset_dir, filename=LABELS_FILENAME):
"""Specifies whether or not the dataset directory contains a label map file.
Args:
dataset_dir: The directory in which the labels file is found.
filename: The filename where the class names are written.
Returns:
`True` if the labels file exists and `False` otherwise.
"""
return tf.gfile.Exists(os.path.join(dataset_dir, filename))
def read_label_file(dataset_dir, filename=LABELS_FILENAME):
"""Reads the labels file and returns a mapping from ID to class name.
Args:
dataset_dir: The directory in which the labels file is found.
filename: The filename where the class names are written.
Returns:
A map from a label (integer) to class name.
"""
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'rb') as f:
lines = f.read().decode()
lines = lines.split('\n')
lines = filter(None, lines)
labels_to_class_names = {}
for line in lines:
index = line.index(':')
labels_to_class_names[int(line[:index])] = line[index+1:]
return labels_to_class_names
def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
"""Opens all TFRecord shards for writing and adds them to an exit stack.
Args:
exit_stack: A context2.ExitStack used to automatically closed the TFRecords
opened in this function.
base_path: The base path for all shards
num_shards: The number of shards
Returns:
The list of opened TFRecords. Position k in the list corresponds to shard k.
"""
tf_record_output_filenames = [
'{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards)
for idx in range(num_shards)
]
tfrecords = [
exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name))
for file_name in tf_record_output_filenames
]
return tfrecords
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/dataset_utils.py | dataset_utils.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the flowers dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_flowers.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
import tf_slim as slim
from datasets import dataset_utils
_FILE_PATTERN = 'flowers_%s_*.tfrecord'
SPLITS_TO_SIZES = {'train': 3320, 'validation': 350}
_NUM_CLASSES = 5
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying size.',
'label': 'A single integer between 0 and 4',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading flowers.
Args:
split_name: A train/validation split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/validation split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/flowers.py | flowers.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.GFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [
l.strip() for l in tf.gfile.GFile(labels_file, 'r').readlines()
]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.GFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.GFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/build_imagenet_data.py | build_imagenet_data.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts cifar10 data to TFRecords of TF-Example protos.
This module downloads the cifar10 data, uncompresses it, reads the files
that make up the cifar10 data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take several minutes to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
import numpy as np
from six.moves import cPickle
from six.moves import urllib
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
# The URL where the CIFAR data can be downloaded.
_DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# The number of training files.
_NUM_TRAIN_FILES = 5
# The height and width of each image.
_IMAGE_SIZE = 32
# The names of the classes.
_CLASS_NAMES = [
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck',
]
def _add_to_tfrecord(filename, tfrecord_writer, offset=0):
"""Loads data from the cifar10 pickle files and writes files to a TFRecord.
Args:
filename: The filename of the cifar10 pickle file.
tfrecord_writer: The TFRecord writer to use for writing.
offset: An offset into the absolute number of images previously written.
Returns:
The new offset.
"""
with tf.gfile.Open(filename, 'rb') as f:
if sys.version_info < (3,):
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding='bytes')
images = data[b'data']
num_images = images.shape[0]
images = images.reshape((num_images, 3, 32, 32))
labels = data[b'labels']
with tf.Graph().as_default():
image_placeholder = tf.placeholder(dtype=tf.uint8)
encoded_image = tf.image.encode_png(image_placeholder)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Reading file [%s] image %d/%d' % (
filename, offset + j + 1, offset + num_images))
sys.stdout.flush()
image = np.squeeze(images[j]).transpose((1, 2, 0))
label = labels[j]
png_string = sess.run(encoded_image,
feed_dict={image_placeholder: image})
example = dataset_utils.image_to_tfexample(
png_string, b'png', _IMAGE_SIZE, _IMAGE_SIZE, label)
tfrecord_writer.write(example.SerializeToString())
return offset + num_images
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The dataset directory where the dataset is stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/cifar10_%s.tfrecord' % (dataset_dir, split_name)
def _download_and_uncompress_dataset(dataset_dir):
"""Downloads cifar10 and uncompresses it locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'cifar-10-batches-py')
tf.gfile.DeleteRecursively(tmp_dir)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
offset = 0
for i in range(_NUM_TRAIN_FILES):
filename = os.path.join(dataset_dir,
'cifar-10-batches-py',
'data_batch_%d' % (i + 1)) # 1-indexed.
offset = _add_to_tfrecord(filename, tfrecord_writer, offset)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
filename = os.path.join(dataset_dir,
'cifar-10-batches-py',
'test_batch')
_add_to_tfrecord(filename, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Cifar10 dataset!')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/download_and_convert_cifar10.py | download_and_convert_cifar10.py |
123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/__init__.py | __init__.py |
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Helper functions to generate the Visual WakeWords dataset.
It filters raw COCO annotations file to Visual WakeWords Dataset
annotations. The resulting annotations and COCO images are then converted
to TF records.
See download_and_convert_visualwakewords.py for the sample usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import io
import json
import os
import contextlib2
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from datasets import dataset_utils
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.flags.DEFINE_string(
'coco_train_url',
'http://images.cocodataset.org/zips/train2014.zip',
'Link to zip file containing coco training data')
tf.app.flags.DEFINE_string(
'coco_validation_url',
'http://images.cocodataset.org/zips/val2014.zip',
'Link to zip file containing coco validation data')
tf.app.flags.DEFINE_string(
'coco_annotations_url',
'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
'Link to zip file containing coco annotation data')
FLAGS = tf.app.flags.FLAGS
def download_coco_dataset(dataset_dir):
"""Download the coco dataset.
Args:
dataset_dir: Path where coco dataset should be downloaded.
"""
dataset_utils.download_and_uncompress_zipfile(FLAGS.coco_train_url,
dataset_dir)
dataset_utils.download_and_uncompress_zipfile(FLAGS.coco_validation_url,
dataset_dir)
dataset_utils.download_and_uncompress_zipfile(FLAGS.coco_annotations_url,
dataset_dir)
def create_labels_file(foreground_class_of_interest,
visualwakewords_labels_file):
"""Generate visualwakewords labels file.
Args:
foreground_class_of_interest: category from COCO dataset that is filtered by
the visualwakewords dataset
visualwakewords_labels_file: output visualwakewords label file
"""
labels_to_class_names = {0: 'background', 1: foreground_class_of_interest}
with open(visualwakewords_labels_file, 'w') as fp:
for label in labels_to_class_names:
fp.write(str(label) + ':' + str(labels_to_class_names[label]) + '\n')
def create_visual_wakeword_annotations(annotations_file,
visualwakewords_annotations_file,
small_object_area_threshold,
foreground_class_of_interest):
"""Generate visual wakewords annotations file.
Loads COCO annotation json files to generate visualwakewords annotations file.
Args:
annotations_file: JSON file containing COCO bounding box annotations
visualwakewords_annotations_file: path to output annotations file
small_object_area_threshold: threshold on fraction of image area below which
small object bounding boxes are filtered
foreground_class_of_interest: category from COCO dataset that is filtered by
the visual wakewords dataset
"""
# default object of interest is person
foreground_class_of_interest_id = 1
with tf.gfile.GFile(annotations_file, 'r') as fid:
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
# Create category index
category_index = {}
for category in groundtruth_data['categories']:
if category['name'] == foreground_class_of_interest:
foreground_class_of_interest_id = category['id']
category_index[category['id']] = category
# Create annotations index, a map of image_id to it's annotations
tf.logging.info('Building annotations index...')
annotations_index = collections.defaultdict(
lambda: collections.defaultdict(list))
# structure is { "image_id": {"objects" : [list of the image annotations]}}
for annotation in groundtruth_data['annotations']:
annotations_index[annotation['image_id']]['objects'].append(annotation)
missing_annotation_count = len(images) - len(annotations_index)
tf.logging.info('%d images are missing annotations.',
missing_annotation_count)
# Create filtered annotations index
annotations_index_filtered = {}
for idx, image in enumerate(images):
if idx % 100 == 0:
tf.logging.info('On image %d of %d', idx, len(images))
annotations = annotations_index[image['id']]
annotations_filtered = _filter_annotations(
annotations, image, small_object_area_threshold,
foreground_class_of_interest_id)
annotations_index_filtered[image['id']] = annotations_filtered
with open(visualwakewords_annotations_file, 'w') as fp:
json.dump(
{
'images': images,
'annotations': annotations_index_filtered,
'categories': category_index
}, fp)
def _filter_annotations(annotations, image, small_object_area_threshold,
foreground_class_of_interest_id):
"""Filters COCO annotations to visual wakewords annotations.
Args:
annotations: dicts with keys: {
u'objects': [{u'id', u'image_id', u'category_id', u'segmentation',
u'area', u'bbox' : [x,y,width,height], u'iscrowd'}] } Notice
that bounding box coordinates in the official COCO dataset
are given as [x, y, width, height] tuples using absolute
coordinates where x, y represent the top-left (0-indexed)
corner.
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
small_object_area_threshold: threshold on fraction of image area below which
small objects are filtered
foreground_class_of_interest_id: category of COCO dataset which visual
wakewords filters
Returns:
annotations_filtered: dict with keys: {
u'objects': [{"area", "bbox" : [x,y,width,height]}],
u'label',
}
"""
objects = []
image_area = image['height'] * image['width']
for annotation in annotations['objects']:
normalized_object_area = annotation['area'] / image_area
category_id = int(annotation['category_id'])
# Filter valid bounding boxes
if category_id == foreground_class_of_interest_id and \
normalized_object_area > small_object_area_threshold:
objects.append({
u'area': annotation['area'],
u'bbox': annotation['bbox'],
})
label = 1 if objects else 0
return {
'objects': objects,
'label': label,
}
def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
output_path, num_shards):
"""Loads Visual WakeWords annotations/images and converts to tf.Record format.
Args:
annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
num_shards: number of output file shards.
"""
with contextlib2.ExitStack() as tf_record_close_stack, \
tf.gfile.GFile(annotations_file, 'r') as fid:
output_tfrecords = dataset_utils.open_sharded_output_tfrecords(
tf_record_close_stack, output_path, num_shards)
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
annotations_index = groundtruth_data['annotations']
annotations_index = {int(k): v for k, v in six.iteritems(annotations_index)}
# convert 'unicode' key to 'int' key after we parse the json file
for idx, image in enumerate(images):
if idx % 100 == 0:
tf.logging.info('On image %d of %d', idx, len(images))
annotations = annotations_index[image['id']]
tf_example = _create_tf_example(image, annotations, image_dir)
shard_idx = idx % num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
def _create_tf_example(image, annotations, image_dir):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
annotations: dict with objects (a list of image annotations) and a label.
{u'objects':[{"area", "bbox" : [x,y,width,height}], u'label'}. Notice
that bounding box coordinates in the COCO dataset are given as[x, y,
width, height] tuples using absolute coordinates where x, y represent
the top-left (0-indexed) corner. This function also converts to the format
that can be used by the Tensorflow Object Detection API (which is [ymin,
xmin, ymax, xmax] with coordinates normalized relative to image size).
image_dir: directory containing the image files.
Returns:
tf_example: The converted tf.Example
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin, xmax, ymin, ymax, area = [], [], [], [], []
for obj in annotations['objects']:
(x, y, width, height) = tuple(obj['bbox'])
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
area.append(obj['area'])
feature_dict = {
'image/height':
dataset_utils.int64_feature(image_height),
'image/width':
dataset_utils.int64_feature(image_width),
'image/filename':
dataset_utils.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_utils.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_utils.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_utils.bytes_feature(encoded_jpg),
'image/format':
dataset_utils.bytes_feature('jpeg'.encode('utf8')),
'image/class/label':
dataset_utils.int64_feature(annotations['label']),
'image/object/bbox/xmin':
dataset_utils.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_utils.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_utils.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_utils.float_list_feature(ymax),
'image/object/area':
dataset_utils.float_list_feature(area),
}
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/download_and_convert_visualwakewords_lib.py | download_and_convert_visualwakewords_lib.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
Associate the ImageNet 2012 Challenge validation data set with labels.
The raw ImageNet validation data set is expected to reside in JPEG files
located in the following directory structure.
data_dir/ILSVRC2012_val_00000001.JPEG
data_dir/ILSVRC2012_val_00000002.JPEG
...
data_dir/ILSVRC2012_val_00050000.JPEG
This script moves the files into a directory structure like such:
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
This directory reorganization requires a mapping from validation image
number (i.e. suffix of the original file) to the associated label. This
is provided in the ImageNet development kit via a Matlab file.
In order to make life easier and divorce ourselves from Matlab, we instead
supply a custom text file that provides this mapping for us.
Sample usage:
./preprocess_imagenet_validation_data.py ILSVRC2012_img_val \
imagenet_2012_validation_synset_labels.txt
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Invalid usage\n'
'usage: preprocess_imagenet_validation_data.py '
'<validation data dir> <validation labels file>')
sys.exit(-1)
data_dir = sys.argv[1]
validation_labels_file = sys.argv[2]
# Read in the 50000 synsets associated with the validation data set.
labels = [l.strip() for l in open(validation_labels_file).readlines()]
unique_labels = set(labels)
# Make all sub-directories in the validation data dir.
for label in unique_labels:
labeled_data_dir = os.path.join(data_dir, label)
os.makedirs(labeled_data_dir)
# Move all of the image to the appropriate sub-directory.
for i in xrange(len(labels)):
basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
original_filename = os.path.join(data_dir, basename)
if not os.path.exists(original_filename):
print('Failed to find: ', original_filename)
sys.exit(-1)
new_filename = os.path.join(data_dir, labels[i], basename)
os.rename(original_filename, new_filename)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/datasets/preprocess_imagenet_validation_data.py | preprocess_imagenet_validation_data.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deploy Slim models across multiple clones and replicas.
# TODO(sguada) docstring paragraph by (a) motivating the need for the file and
# (b) defining clones.
# TODO(sguada) describe the high-level components of model deployment.
# E.g. "each model deployment is composed of several parts: a DeploymentConfig,
# which captures A, B and C, an input_fn which loads data.. etc
To easily train a model on multiple GPUs or across multiple machines this
module provides a set of helper functions: `create_clones`,
`optimize_clones` and `deploy`.
Usage:
g = tf.Graph()
# Set up DeploymentConfig
config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
# Define the inputs
with tf.device(config.inputs_device()):
images, labels = LoadData(...)
inputs_queue = slim.data.prefetch_queue((images, labels))
# Define the optimizer.
with tf.device(config.optimizer_device()):
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Define the model including the loss.
def model_fn(inputs_queue):
images, labels = inputs_queue.dequeue()
predictions = CreateNetwork(images)
slim.losses.log_loss(predictions, labels)
model_dp = model_deploy.deploy(config, model_fn, [inputs_queue],
optimizer=optimizer)
# Run training.
slim.learning.train(model_dp.train_op, my_log_dir,
summary_op=model_dp.summary_op)
The Clone namedtuple holds together the values associated with each call to
model_fn:
* outputs: The return values of the calls to `model_fn()`.
* scope: The scope used to create the clone.
* device: The device used to create the clone.
DeployedModel namedtuple, holds together the values needed to train multiple
clones:
* train_op: An operation that run the optimizer training op and include
all the update ops created by `model_fn`. Present only if an optimizer
was specified.
* summary_op: An operation that run the summaries created by `model_fn`
and process_gradients.
* total_loss: A `Tensor` that contains the sum of all losses created by
`model_fn` plus the regularization losses.
* clones: List of `Clone` tuples returned by `create_clones()`.
DeploymentConfig parameters:
* num_clones: Number of model clones to deploy in each replica.
* clone_on_cpu: True if clones should be placed on CPU.
* replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
* num_replicas: Number of replicas to use.
* num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
* worker_job_name: A name for the worker job.
* ps_job_name: A name for the parameter server job.
TODO(sguada):
- describe side effect to the graph.
- what happens to summaries and update_ops.
- which graph collections are altered.
- write a tutorial on how to use this.
- analyze the possibility of calling deploy more than once.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import tf_slim as slim
__all__ = ['create_clones',
'deploy',
'optimize_clones',
'DeployedModel',
'DeploymentConfig',
'Clone',
]
# Namedtuple used to represent a clone during deployment.
Clone = collections.namedtuple('Clone',
['outputs', # Whatever model_fn() returned.
'scope', # The scope used to create it.
'device', # The device used to create.
])
# Namedtuple used to represent a DeployedModel, returned by deploy().
DeployedModel = collections.namedtuple('DeployedModel',
['train_op', # The `train_op`
'summary_op', # The `summary_op`
'total_loss', # The loss `Tensor`
'clones', # A list of `Clones` tuples.
])
# Default parameters for DeploymentConfig
_deployment_params = {'num_clones': 1,
'clone_on_cpu': False,
'replica_id': 0,
'num_replicas': 1,
'num_ps_tasks': 0,
'worker_job_name': 'worker',
'ps_job_name': 'ps'}
def create_clones(config, model_fn, args=None, kwargs=None):
"""Creates multiple clones according to config using a `model_fn`.
The returned values of `model_fn(*args, **kwargs)` are collected along with
the scope and device used to created it in a namedtuple
`Clone(outputs, scope, device)`
Note: it is assumed that any loss created by `model_fn` is collected at
the tf.GraphKeys.LOSSES collection.
To recover the losses, summaries or update_ops created by the clone use:
```python
losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
```
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A DeploymentConfig object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
Returns:
A list of namedtuples `Clone`.
"""
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar('/'.join(filter(None,
['Losses', clone.scope, 'clone_loss'])),
clone_loss)
if regularization_loss is not None:
tf.summary.scalar('Losses/regularization_loss', regularization_loss)
return sum_loss
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
"""Deploys a Slim-constructed model across multiple clones.
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
The optional argument `optimizer` is an `Optimizer` object. If not `None`,
the deployed model is configured for training with that optimizer.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.get_or_create_global_step()
# Compute the gradients for the clones.
total_loss, clones_gradients = optimize_clones(clones, optimizer)
if clones_gradients:
if summarize_gradients:
# Add summaries to the gradients.
summaries |= set(_add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss = _gather_clone_loss(clone, len(clones),
regularization_losses)
if clone_loss is not None:
clones_losses.append(clone_loss)
# Only use regularization_losses for the first clone
regularization_losses = None
if clones_losses:
total_loss = tf.add_n(clones_losses, name='total_loss')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone.scope))
if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if summaries:
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
else:
summary_op = None
return DeployedModel(train_op, summary_op, total_loss, clones)
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
grad_values))
summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
class DeploymentConfig(object):
"""Configuration for deploying a model with `deploy()`.
You can pass an instance of this class to `deploy()` to specify exactly
how to deploy the model to build. If you do not pass one, an instance built
from the default deployment_hparams will be used.
"""
def __init__(self,
num_clones=1,
clone_on_cpu=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0,
worker_job_name='worker',
ps_job_name='ps'):
"""Create a DeploymentConfig.
The config describes how to deploy a model across multiple clones and
replicas. The model will be replicated `num_clones` times in each replica.
If `clone_on_cpu` is True, each clone will placed on CPU.
If `num_replicas` is 1, the model is deployed via a single process. In that
case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored.
If `num_replicas` is greater than 1, then `worker_device` and `ps_device`
must specify TensorFlow devices for the `worker` and `ps` jobs and
`num_ps_tasks` must be positive.
Args:
num_clones: Number of model clones to deploy in each replica.
clone_on_cpu: If True clones would be placed on CPU.
replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
num_replicas: Number of replicas to use.
num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
worker_job_name: A name for the worker job.
ps_job_name: A name for the parameter server job.
Raises:
ValueError: If the arguments are invalid.
"""
if num_replicas > 1:
if num_ps_tasks < 1:
raise ValueError('When using replicas num_ps_tasks must be positive')
if num_replicas > 1 or num_ps_tasks > 0:
if not worker_job_name:
raise ValueError('Must specify worker_job_name when using replicas')
if not ps_job_name:
raise ValueError('Must specify ps_job_name when using parameter server')
if replica_id >= num_replicas:
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else ''
self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else ''
@property
def num_clones(self):
return self._num_clones
@property
def clone_on_cpu(self):
return self._clone_on_cpu
@property
def replica_id(self):
return self._replica_id
@property
def num_replicas(self):
return self._num_replicas
@property
def num_ps_tasks(self):
return self._num_ps_tasks
@property
def ps_device(self):
return self._ps_device
@property
def worker_device(self):
return self._worker_device
def caching_device(self):
"""Returns the device to use for caching variables.
Variables are cached on the worker CPU when using replicas.
Returns:
A device string or None if the variables do not need to be cached.
"""
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None
def clone_device(self, clone_index):
"""Device used to create the clone and all the ops inside the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A value suitable for `tf.device()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
device += '/device:GPU:%d' % clone_index
return device
def clone_scope(self, clone_index):
"""Name scope to create the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A name_scope suitable for `tf.name_scope()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope
def optimizer_device(self):
"""Device to use with the optimizer.
Returns:
A value suitable for `tf.device()`.
"""
if self._num_ps_tasks > 0 or self._num_clones > 0:
return self._worker_device + '/device:CPU:0'
else:
return ''
def inputs_device(self):
"""Device to use to build the inputs.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
"""Returns the device to use for variables created inside the clone.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op.startswith('Variable'):
t = self._task
self._task = (self._task + 1) % self._tasks
d = '%s/task:%d' % (self._device, t)
return d
else:
return op.device
if not self._num_ps_tasks:
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/deployment/model_deploy.py | model_deploy.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_deploy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from deployment import model_deploy
class DeploymentConfigTest(tf.test.TestCase):
def testDefaults(self):
deploy_config = model_deploy.DeploymentConfig()
self.assertEqual(slim.get_variables(), [])
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testCPUonly(self):
deploy_config = model_deploy.DeploymentConfig(clone_on_cpu=True)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'CPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testMultiGPU(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1), 'GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def testMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=1)
self.assertEqual(deploy_config.caching_device()(tf.no_op()), '')
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_clones=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testVariablesPS(self):
deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
reuse=reuse):
predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
reuse=reuse):
inputs = slim.batch_norm(inputs, decay=0.1, fused=True)
predictions = slim.fully_connected(inputs, 1,
activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
class CreatecloneTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(len(clones), num_clones)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
self.assertEqual(len(update_ops), 2)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateMulticloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
num_ps_tasks=2)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for i, v in enumerate(slim.get_variables()):
t = i % 2
self.assertDeviceEqual(v.device, '/job:ps/task:%d/device:CPU:0' % t)
self.assertDeviceEqual(v.device, v.value().device)
self.assertEqual(len(clones), 2)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:%d' % i)
class OptimizeclonesTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 2)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticloneCPU(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones,
clone_on_cpu=True)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '/job:worker/device:GPU:0')
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
class DeployTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var
def testLocalTrainOp(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
clone_on_cpu=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
self.assertEqual(slim.get_variables(), [])
model = model_deploy.deploy(deploy_config, model_fn, model_args,
optimizer=optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 4)
self.assertEqual(len(model.clones), 2)
self.assertEqual(model.total_loss.op.name, 'total_loss')
self.assertEqual(model.summary_op.op.name, 'summary_op/summary_op')
self.assertEqual(model.train_op.op.name, 'train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
moving_mean = slim.get_variables_by_name('moving_mean')[0]
moving_variance = slim.get_variables_by_name('moving_variance')[0]
initial_loss = sess.run(model.total_loss)
initial_mean, initial_variance = sess.run([moving_mean,
moving_variance])
self.assertAllClose(initial_mean, [0.0, 0.0, 0.0, 0.0])
self.assertAllClose(initial_variance, [1.0, 1.0, 1.0, 1.0])
for _ in range(10):
sess.run(model.train_op)
final_loss = sess.run(model.total_loss)
self.assertLess(final_loss, initial_loss / 5.0)
final_mean, final_variance = sess.run([moving_mean,
moving_variance])
expected_mean = np.array([0.125, 0.25, 0.375, 0.25])
expected_var = np.array([0.109375, 0.1875, 0.234375, 0.1875])
expected_var = self._addBesselsCorrection(16, expected_var)
self.assertAllClose(final_mean, expected_mean)
self.assertAllClose(final_variance, expected_var)
def testNoSummariesOnGPU(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = slim.l2_regularizer(0.001)
slim.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
optimizer=tf.train.GradientDescentOptimizer(1.0))
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = slim.l2_regularizer(0.001)
slim.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/deployment/model_deploy_test.py | model_deploy_test.py |
123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/deployment/__init__.py | __init__.py |
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates and runs `Estimator` for object detection model on TPUs.
This uses the TPUEstimator API to define and run a model in TRAIN/EVAL modes.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from object_detection import model_lib
tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs')
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_name',
default=None,
help='Name of the Cloud TPU for Cluster Resolvers.')
flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop.')
# For mode=train_and_eval, evaluation occurs after training is finished.
# Note: independently of steps_per_checkpoint, estimator will save the most
# recent checkpoint every 10 minutes by default for train_and_eval
flags.DEFINE_string('mode', 'train',
'Mode to run: train, eval')
flags.DEFINE_integer('train_batch_size', None, 'Batch size for training. If '
'this is not provided, batch size is read from training '
'config.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.')
flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer(
'max_eval_retries', 0, 'If running continuous eval, the maximum number of '
'retries upon encountering tf.errors.InvalidArgumentError. If negative, '
'will always retry the evaluation.'
)
FLAGS = tf.flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tpu_cluster_resolver = (
tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project))
tpu_grpc_url = tpu_cluster_resolver.get_master()
config = tf.estimator.tpu.RunConfig(
master=tpu_grpc_url,
evaluation_master=tpu_grpc_url,
model_dir=FLAGS.model_dir,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_shards))
kwargs = {}
if FLAGS.train_batch_size:
kwargs['batch_size'] = FLAGS.train_batch_size
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
pipeline_config_path=FLAGS.pipeline_config_path,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
use_tpu_estimator=True,
use_tpu=FLAGS.use_tpu,
num_shards=FLAGS.num_shards,
save_final_config=FLAGS.mode == 'train',
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
train_steps = train_and_eval_dict['train_steps']
if FLAGS.mode == 'train':
estimator.train(input_fn=train_input_fn, max_steps=train_steps)
# Continuously evaluating.
if FLAGS.mode == 'eval':
if FLAGS.eval_training_data:
name = 'training_data'
input_fn = eval_on_train_input_fn
else:
name = 'validation_data'
# Currently only a single eval input is allowed.
input_fn = eval_input_fns[0]
model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps,
name, FLAGS.max_eval_retries)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_tpu_main.py | model_tpu_main.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import time
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.metrics import lvis_evaluation
from object_detection.protos import eval_pb2
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
EVAL_KEYPOINT_METRIC = 'coco_keypoint_metrics'
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_keypoint_metrics':
coco_evaluation.CocoKeypointEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'coco_panoptic_metrics':
coco_evaluation.CocoPanopticSegmentationEvaluator,
'lvis_mask_metrics':
lvis_evaluation.LVISMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'oid_challenge_segmentation_metrics':
object_detection_evaluation
.OpenImagesInstanceSegmentationChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'precision_at_recall_detection_metrics':
object_detection_evaluation.PrecisionAtRecallDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
"""Write metrics to a summary directory.
Args:
metrics: A dictionary containing metric names and values.
global_step: Global step at which the metrics are computed.
summary_dir: Directory to write tensorflow summaries to.
"""
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
# TODO(rathodv): Add tests.
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
"""Visualizes detection results and writes visualizations to image summaries.
This function visualizes an image with its detected bounding boxes and writes
to image summaries which can be viewed on tensorboard. It optionally also
writes images to a directory. In the case of missing entry in the label map,
unknown class name in the visualization is shown as "N/A".
Args:
result_dict: a dictionary holding groundtruth and detection
data corresponding to each image being evaluated. The following keys
are required:
'original_image': a numpy array representing the image with shape
[1, height, width, 3] or [1, height, width, 1]
'detection_boxes': a numpy array of shape [N, 4]
'detection_scores': a numpy array of shape [N]
'detection_classes': a numpy array of shape [N]
The following keys are optional:
'groundtruth_boxes': a numpy array of shape [N, 4]
'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
Detections are assumed to be provided in decreasing order of score and for
display, and we assume that scores are probabilities between 0 and 1.
tag: tensorboard tag (string) to associate with image.
global_step: global step at which the visualization are generated.
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
summary_dir: the output directory to which the image summaries are written.
export_dir: the output directory to which images are written. If this is
empty (default), then images are not exported.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not.
show_groundtruth: boolean (default: False) controlling whether to show
groundtruth boxes in addition to detected boxes
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
min_score_thresh: minimum score threshold for a box to be visualized
max_num_predictions: maximum number of detections to visualize
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
keep_image_id_for_visualization_export: whether to keep image identifier in
filename when exported to export_dir
Raises:
ValueError: if result_dict does not contain the expected keys (i.e.,
'original_image', 'detection_boxes', 'detection_scores',
'detection_classes')
"""
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1: # If one channel image, repeat in RGB.
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
# Plot groundtruth underneath detections
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None,
process_metrics_fn=None):
"""Evaluates metrics defined in evaluators and returns summaries.
This function loads the latest checkpoint in checkpoint_dirs and evaluates
all metrics defined in evaluators. The metrics are processed in batch by the
batch_processor.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking four arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
To skip an image, it suffices to return an empty dictionary in place of
result_dict.
checkpoint_dirs: list of directories to load into an EnsembleModel. If it
has only one directory, EnsembleModel will not be used --
a DetectionModel
will be instantiated directly. Not used if restore_fn is set.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: None, or a function that takes a tf.Session object and correctly
restores all necessary variables from the correct checkpoint file. If
None, attempts to restore from the first directory in checkpoint_dirs.
num_batches: the number of batches to use for evaluation.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
process_metrics_fn: a callback called with evaluation results after each
evaluation is done. It could be used e.g. to back up checkpoints with
best evaluation scores, or to call an external system to update evaluation
results in order to drive best hyper-parameter search. Parameters are:
int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
str checkpoint_file path.
Returns:
global_step: the count of global steps.
all_evaluator_metrics: A dictionary containing metric names and values.
Raises:
ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
one element.
ValueError: if save_graph is True and save_graph_dir is not defined.
"""
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
checkpoint_file = None
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
# TODO(b/65130867): Use image_id tensor once we fix the input data
# decoders to return correct image_id.
# TODO(akuznetsa): result_dict contains batches of images, while
# add_single_ground_truth_image_info expects a single image. Fix
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
# When done, ask the threads to stop.
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
if process_metrics_fn and checkpoint_file:
m = re.search(r'model.ckpt-(\d+)$', checkpoint_file)
if not m:
tf.logging.error('Failed to parse checkpoint number from: %s',
checkpoint_file)
else:
checkpoint_number = int(m.group(1))
process_metrics_fn(checkpoint_number, all_evaluator_metrics,
checkpoint_file)
sess.close()
return (global_step, all_evaluator_metrics)
# TODO(rathodv): Add tests.
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
max_evaluation_global_step=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None,
process_metrics_fn=None):
"""Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.
This function repeatedly loads a checkpoint and evaluates a desired
set of tensors (provided by tensor_dict) and hands the resulting numpy
arrays to a function result_processor which can be used to further
process/save/visualize the results.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
summary_dir: a directory to write metrics summaries.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking three arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
checkpoint_dirs: list of directories to load into a DetectionModel or an
EnsembleModel if restore_fn isn't set. Also used to determine when to run
next evaluation. Must have at least one element.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: a function that takes a tf.Session object and correctly restores
all necessary variables from the correct checkpoint file.
num_batches: the number of batches to use for evaluation.
eval_interval_secs: the number of seconds between each evaluation run.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as None the evaluation continues indefinitely.
max_evaluation_global_step: global step when evaluation stops.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
process_metrics_fn: a callback called with evaluation results after each
evaluation is done. It could be used e.g. to back up checkpoints with
best evaluation scores, or to call an external system to update evaluation
results in order to drive best hyper-parameter search. Parameters are:
int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
str checkpoint_file path.
Returns:
metrics: A dictionary containing metric names and values in the latest
evaluation.
Raises:
ValueError: if max_num_of_evaluations is not None or a positive number.
ValueError: if checkpoint_dirs doesn't have at least one element.
"""
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`max_number_of_evaluations` must be either None or a positive number.')
if max_evaluation_global_step and max_evaluation_global_step <= 0:
raise ValueError(
'`max_evaluation_global_step` must be either None or positive.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path,
process_metrics_fn=process_metrics_fn)
write_metrics(metrics, global_step, summary_dir)
if (max_evaluation_global_step and
global_step >= max_evaluation_global_step):
tf.logging.info('Finished evaluation!')
break
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(arg_tuple):
"""Resizes detection masks.
Args:
arg_tuple: A (detection_boxes, detection_masks, image_shape, pad_shape)
tuple where
detection_boxes is a tf.float32 tensor of size [num_masks, 4] containing
the box corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
detection_masks is a tensor of size
[num_masks, mask_height, mask_width].
image_shape is a tensor of shape [2]
pad_shape is a tensor of shape [2] --- this is assumed to be greater
than or equal to image_shape along both dimensions and represents a
shape to-be-padded-to.
Returns:
"""
detection_boxes, detection_masks, image_shape, pad_shape = arg_tuple
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
pad_instance_dim = tf.zeros([3, 1], dtype=tf.int32)
pad_hw_dim = tf.concat([tf.zeros([1], dtype=tf.int32),
pad_shape - image_shape], axis=0)
pad_hw_dim = tf.expand_dims(pad_hw_dim, 1)
paddings = tf.concat([pad_instance_dim, pad_hw_dim], axis=1)
detection_masks_reframed = tf.pad(detection_masks_reframed, paddings)
# If the masks are currently float, binarize them. Otherwise keep them as
# integers, since they have already been thresholded.
if detection_masks_reframed.dtype == tf.float32:
detection_masks_reframed = tf.greater(detection_masks_reframed, 0.5)
return tf.cast(detection_masks_reframed, tf.uint8)
def resize_detection_masks(detection_boxes, detection_masks,
original_image_spatial_shapes):
"""Resizes per-box detection masks to be relative to the entire image.
Note that this function only works when the spatial size of all images in
the batch is the same. If not, this function should be used with batch_size=1.
Args:
detection_boxes: A [batch_size, num_instances, 4] float tensor containing
bounding boxes.
detection_masks: A [batch_size, num_instances, height, width] float tensor
containing binary instance masks per box.
original_image_spatial_shapes: a [batch_size, 3] shaped int tensor
holding the spatial dimensions of each image in the batch.
Returns:
masks: Masks resized to the spatial extents given by
(original_image_spatial_shapes[0, 0], original_image_spatial_shapes[0, 1])
"""
# modify original image spatial shapes to be max along each dim
# in evaluator, should have access to original_image_spatial_shape field
# in add_Eval_Dict
max_spatial_shape = tf.reduce_max(
original_image_spatial_shapes, axis=0, keep_dims=True)
tiled_max_spatial_shape = tf.tile(
max_spatial_shape,
multiples=[tf.shape(original_image_spatial_shapes)[0], 1])
return shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes,
detection_masks,
original_image_spatial_shapes,
tiled_max_spatial_shape],
dtype=tf.uint8)
def _resize_groundtruth_masks(args):
"""Resizes groundtruth masks to the original image size."""
mask, true_image_shape, original_image_shape, pad_shape = args
true_height = true_image_shape[0]
true_width = true_image_shape[1]
mask = mask[:, :true_height, :true_width]
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
original_image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
paddings = tf.concat(
[tf.zeros([3, 1], dtype=tf.int32),
tf.expand_dims(
tf.concat([tf.zeros([1], dtype=tf.int32),
pad_shape-original_image_shape], axis=0),
1)], axis=1)
mask = tf.pad(tf.squeeze(mask, 3), paddings)
return tf.cast(mask, tf.uint8)
def _resize_surface_coordinate_masks(args):
detection_boxes, surface_coords, image_shape = args
surface_coords_v, surface_coords_u = tf.unstack(surface_coords, axis=-1)
surface_coords_v_reframed = ops.reframe_box_masks_to_image_masks(
surface_coords_v, detection_boxes, image_shape[0], image_shape[1])
surface_coords_u_reframed = ops.reframe_box_masks_to_image_masks(
surface_coords_u, detection_boxes, image_shape[0], image_shape[1])
return tf.stack([surface_coords_v_reframed, surface_coords_u_reframed],
axis=-1)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
image: A single 4D uint8 image tensor of shape [1, H, W, C].
key: A single string tensor identifying the image.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with
keypoints (Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
Returns:
A dictionary with:
'original_image': A [1, H, W, C] uint8 image tensor.
'key': A string tensor with image identifier.
'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [max_detections] float32 tensor of scores.
'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
'detection_masks': [max_detections, H, W] float32 tensor of binarized
masks, reframed to full image masks.
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
(Optional)
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with
keypoints (Optional).
"""
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
# expand groundtruth dict along the batch dimension.
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
# remove the batch dimension.
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None,
label_id_offset=1):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
The groundtruth coordinates of boxes/keypoints in 'groundtruth' dictionary are
normalized relative to the (potentially padded) input image, while the
coordinates in 'detection' dictionary are normalized relative to the true
image shape.
Args:
images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
keys: A [batch_size] string/int tensor with image identifier.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
of boxes, in normalized coordinates.
'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of
1-indexed classes.
'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
bbox area. (Optional)
'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance
masks (Optional).
'groundtruth_keypoints': [batch_size, max_number_of_boxes, num_keypoints,
2] float32 tensor with keypoints (Optional).
'groundtruth_keypoint_visibilities': [batch_size, max_number_of_boxes,
num_keypoints] bool tensor with keypoint visibilities (Optional).
'groundtruth_labeled_classes': [batch_size, num_classes] int64
tensor of 1-indexed classes. (Optional)
'groundtruth_dp_num_points': [batch_size, max_number_of_boxes] int32
tensor. (Optional)
'groundtruth_dp_part_ids': [batch_size, max_number_of_boxes,
max_sampled_points] int32 tensor. (Optional)
'groundtruth_dp_surface_coords_list': [batch_size, max_number_of_boxes,
max_sampled_points, 4] float32 tensor. (Optional)
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
used to resize the image. When set to None, the image size is retained.
true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
containing the size of the unpadded original_image.
max_gt_boxes: [batch_size] tensor representing the maximum number of
groundtruth boxes to pad.
label_id_offset: offset for class ids.
Returns:
A dictionary with:
'original_image': A [batch_size, H, W, C] uint8 image tensor.
'original_image_spatial_shape': A [batch_size, 2] tensor containing the
original image sizes.
'true_image_shape': A [batch_size, 3] tensor containing the size of
the unpadded original_image.
'key': A [batch_size] string tensor with image identifier.
'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
in normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [batch_size, max_detections] float32 tensor of scores.
'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
classes.
'detection_masks': [batch_size, max_detections, H, W] uint8 tensor of
instance masks, reframed to full image masks. Note that these may be
binarized (e.g. {0, 1}), or may contain 1-indexed part labels. (Optional)
'detection_keypoints': [batch_size, max_detections, num_keypoints, 2]
float32 tensor containing keypoint coordinates. (Optional)
'detection_keypoint_scores': [batch_size, max_detections, num_keypoints]
float32 tensor containing keypoint scores. (Optional)
'detection_surface_coords': [batch_size, max_detection, H, W, 2] float32
tensor with normalized surface coordinates (e.g. DensePose UV
coordinates). (Optional)
'num_detections': [batch_size] int64 tensor containing number of valid
detections.
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes. (Optional)
'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
area. (Optional)
'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance masks
(Optional).
'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32
tensor with keypoints (Optional).
'groundtruth_keypoint_visibilities': [batch_size, num_boxes, num_keypoints]
bool tensor with keypoint visibilities (Optional).
'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensor
of 1-indexed classes. (Optional)
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image.
Raises:
ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
[2].
ValueError: if true_image_shapes is not 2D int32 tensor of shape
[3].
"""
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.cast(detections[detection_fields.num_detections],
dtype=tf.int32)
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
output_dict[detection_fields.detection_masks] = resize_detection_masks(
detection_boxes, detection_masks, original_image_spatial_shapes)
if detection_fields.detection_surface_coords in detections:
detection_surface_coords = detections[
detection_fields.detection_surface_coords]
output_dict[detection_fields.detection_surface_coords] = (
shape_utils.static_or_dynamic_map_fn(
_resize_surface_coordinate_masks,
elems=[detection_boxes, detection_surface_coords,
original_image_spatial_shapes],
dtype=tf.float32))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if detection_fields.detection_keypoint_scores in detections:
output_dict[detection_fields.detection_keypoint_scores] = detections[
detection_fields.detection_keypoint_scores]
else:
output_dict[detection_fields.detection_keypoint_scores] = tf.ones_like(
detections[detection_fields.detection_keypoints][:, :, :, 0])
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
max_spatial_shape = tf.reduce_max(
original_image_spatial_shapes, axis=0, keep_dims=True)
tiled_max_spatial_shape = tf.tile(
max_spatial_shape,
multiples=[tf.shape(original_image_spatial_shapes)[0], 1])
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, true_image_shapes,
original_image_spatial_shapes,
tiled_max_spatial_shape],
dtype=tf.uint8))
output_dict.update(groundtruth)
image_shape = tf.cast(tf.shape(images), tf.float32)
image_height, image_width = image_shape[1], image_shape[2]
def _scale_box_to_normalized_true_image(args):
"""Scale the box coordinates to be relative to the true image shape."""
boxes, true_image_shape = args
true_image_shape = tf.cast(true_image_shape, tf.float32)
true_height, true_width = true_image_shape[0], true_image_shape[1]
normalized_window = tf.stack([0.0, 0.0, true_height / image_height,
true_width / image_width])
return box_list_ops.change_coordinate_frame(
box_list.BoxList(boxes), normalized_window).get()
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
groundtruth_boxes = shape_utils.static_or_dynamic_map_fn(
_scale_box_to_normalized_true_image,
elems=[groundtruth_boxes, true_image_shapes], dtype=tf.float32)
output_dict[input_data_fields.groundtruth_boxes] = groundtruth_boxes
if input_data_fields.groundtruth_keypoints in groundtruth:
# If groundtruth_keypoints is in the groundtruth dictionary. Update the
# coordinates to conform with the true image shape.
def _scale_keypoints_to_normalized_true_image(args):
"""Scale the box coordinates to be relative to the true image shape."""
keypoints, true_image_shape = args
true_image_shape = tf.cast(true_image_shape, tf.float32)
true_height, true_width = true_image_shape[0], true_image_shape[1]
normalized_window = tf.stack(
[0.0, 0.0, true_height / image_height, true_width / image_width])
return keypoint_ops.change_coordinate_frame(keypoints,
normalized_window)
groundtruth_keypoints = groundtruth[
input_data_fields.groundtruth_keypoints]
groundtruth_keypoints = shape_utils.static_or_dynamic_map_fn(
_scale_keypoints_to_normalized_true_image,
elems=[groundtruth_keypoints, true_image_shapes],
dtype=tf.float32)
output_dict[
input_data_fields.groundtruth_keypoints] = groundtruth_keypoints
if scale_to_absolute:
groundtruth_boxes = output_dict[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
if input_data_fields.groundtruth_keypoints in groundtruth:
groundtruth_keypoints = output_dict[
input_data_fields.groundtruth_keypoints]
output_dict[input_data_fields.groundtruth_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[groundtruth_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
'keypoints': (optional) dict mapping this category's keypoints to unique
ids.
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
if isinstance(eval_config, eval_pb2.EvalConfig):
parameterized_metrics = eval_config.parameterized_metric
for parameterized_metric in parameterized_metrics:
assert parameterized_metric.HasField('parameterized_metric')
if parameterized_metric.WhichOneof(
'parameterized_metric') == EVAL_KEYPOINT_METRIC:
keypoint_metrics = parameterized_metric.coco_keypoint_metrics
# Create category to keypoints mapping dict.
category_keypoints = {}
class_label = keypoint_metrics.class_label
category = None
for cat in categories:
if cat['name'] == class_label:
category = cat
break
if not category:
continue
keypoints_for_this_class = category['keypoints']
category_keypoints = [{
'id': keypoints_for_this_class[kp_name], 'name': kp_name
} for kp_name in keypoints_for_this_class]
# Create keypoint evaluator for this category.
evaluators_list.append(EVAL_METRICS_CLASS_DICT[EVAL_KEYPOINT_METRIC](
category['id'], category_keypoints, class_label,
keypoint_metrics.keypoint_label_to_sigmas))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
"""Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
eval_dict: An evaluation dictionary, returned from
result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can be
used as eval metric ops in tf.EstimatorSpec.
"""
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
"""Produces a dictionary of evaluation options for each eval metric.
Args:
eval_config: An `eval_pb2.EvalConfig`.
Returns:
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
"""
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in (
'coco_detection_metrics', 'coco_mask_metrics', 'lvis_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
if (hasattr(eval_config, 'all_metrics_per_category') and
eval_config.all_metrics_per_category):
evaluator_options[eval_metric_fn_key].update({
'all_metrics_per_category': eval_config.all_metrics_per_category
})
# For coco detection eval, if the eval_config proto contains the
# "skip_predictions_for_unlabeled_class" field, include this field in
# evaluator_options.
if eval_metric_fn_key == 'coco_detection_metrics' and hasattr(
eval_config, 'skip_predictions_for_unlabeled_class'):
evaluator_options[eval_metric_fn_key].update({
'skip_predictions_for_unlabeled_class':
(eval_config.skip_predictions_for_unlabeled_class)
})
for super_category in eval_config.super_categories:
if 'super_categories' not in evaluator_options[eval_metric_fn_key]:
evaluator_options[eval_metric_fn_key]['super_categories'] = {}
key = super_category
value = eval_config.super_categories[key].split(',')
evaluator_options[eval_metric_fn_key]['super_categories'][key] = value
if eval_metric_fn_key == 'lvis_mask_metrics' and hasattr(
eval_config, 'export_path'):
evaluator_options[eval_metric_fn_key].update({
'export_path': eval_config.export_path
})
elif eval_metric_fn_key == 'precision_at_recall_detection_metrics':
evaluator_options[eval_metric_fn_key] = {
'recall_lower_bound': (eval_config.recall_lower_bound),
'recall_upper_bound': (eval_config.recall_upper_bound)
}
return evaluator_options
def has_densepose(eval_dict):
return (fields.DetectionResultFields.detection_masks in eval_dict and
fields.DetectionResultFields.detection_surface_coords in eval_dict)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/eval_util.py | eval_util.py |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for export_tflite_graph_lib_tf2.py."""
from __future__ import division
import os
import unittest
import six
import tensorflow.compat.v2 as tf
from object_detection import export_tflite_graph_lib_tf2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
from google.protobuf import text_format
if six.PY2:
import mock # pylint: disable=g-importing-member,g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self):
super(FakeModel, self).__init__(num_classes=2)
self._conv = tf.keras.layers.Conv2D(
filters=1,
kernel_size=1,
strides=(1, 1),
padding='valid',
kernel_initializer=tf.keras.initializers.Constant(value=1.0))
def preprocess(self, inputs):
true_image_shapes = [] # Doesn't matter for the fake model.
return tf.identity(inputs), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
prediction_tensors = {'image': self._conv(preprocessed_inputs)}
with tf.control_dependencies([prediction_tensors['image']]):
prediction_tensors['box_encodings'] = tf.constant(
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]], tf.float32)
prediction_tensors['class_predictions_with_background'] = tf.constant(
[[[0.7, 0.6], [0.9, 0.0]]], tf.float32)
with tf.control_dependencies([
tf.convert_to_tensor(
prediction_tensors['image'].get_shape().as_list()[1:3])
]):
prediction_tensors['anchors'] = tf.constant(
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32)
return prediction_tensors
def postprocess(self, prediction_dict, true_image_shapes):
predict_tensor_sum = tf.reduce_sum(prediction_dict['image'])
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes':
tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]],
tf.float32),
'detection_scores':
predict_tensor_sum +
tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32),
'detection_classes':
tf.constant([[0, 1], [1, 0]], tf.float32),
'num_detections':
tf.constant([2, 1], tf.float32),
'detection_keypoints':
tf.zeros([2, 17, 2], tf.float32),
'detection_keypoint_scores':
tf.zeros([2, 17], tf.float32),
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, from_detection_checkpoint):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExportTfLiteGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_dir):
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _get_ssd_config(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold = 0.5
return pipeline_config
def _get_center_net_config(self):
pipeline_config_text = """
model {
center_net {
num_classes: 1
feature_extractor {
type: "mobilenet_v2_fpn"
}
image_resizer {
fixed_shape_resizer {
height: 10
width: 10
}
}
object_detection_task {
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
classification_loss {
}
max_box_predictions: 20
}
keypoint_estimation_task {
loss {
localization_loss {
l1_localization_loss {
}
}
classification_loss {
penalty_reduced_logistic_focal_loss {
}
}
}
}
}
}
"""
return text_format.Parse(
pipeline_config_text, pipeline_pb2.TrainEvalPipelineConfig())
# The tf.implements signature is important since it ensures MLIR legalization,
# so we test it here.
def test_postprocess_implements_signature(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
pipeline_config = self._get_ssd_config()
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
ckpt = tf.train.Checkpoint(model=detection_model)
manager = tf.train.CheckpointManager(ckpt, tmp_dir, max_to_keep=1)
ckpt.restore(manager.latest_checkpoint).expect_partial()
# The module helps build a TF graph appropriate for TFLite conversion.
detection_module = export_tflite_graph_lib_tf2.SSDModule(
pipeline_config=pipeline_config,
detection_model=detection_model,
max_detections=20,
use_regular_nms=True)
expected_signature = ('name: "TFLite_Detection_PostProcess" attr { key: '
'"max_detections" value { i: 20 } } attr { key: '
'"max_classes_per_detection" value { i: 1 } } attr '
'{ key: "use_regular_nms" value { b: true } } attr '
'{ key: "nms_score_threshold" value { f: 0.000000 }'
' } attr { key: "nms_iou_threshold" value { f: '
'0.500000 } } attr { key: "y_scale" value { f: '
'10.000000 } } attr { key: "x_scale" value { f: '
'10.000000 } } attr { key: "h_scale" value { f: '
'5.000000 } } attr { key: "w_scale" value { f: '
'5.000000 } } attr { key: "num_classes" value { i: '
'2 } }')
self.assertEqual(expected_signature,
detection_module.postprocess_implements_signature())
def test_unsupported_architecture(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
expected_message = 'Only ssd or center_net models are supported in tflite'
try:
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
except ValueError as e:
if expected_message not in str(e):
raise
else:
raise AssertionError('Exception not raised: %s' % expected_message)
def test_export_yields_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_ssd_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
self.assertTrue(
os.path.exists(
os.path.join(output_directory, 'saved_model', 'saved_model.pb')))
self.assertTrue(
os.path.exists(
os.path.join(output_directory, 'saved_model', 'variables',
'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(output_directory, 'saved_model', 'variables',
'variables.data-00000-of-00001')))
def test_exported_model_inference(self):
tmp_dir = self.get_temp_dir()
output_directory = os.path.join(tmp_dir, 'output')
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_ssd_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
detections = detect_fn_sig(image)
# The exported graph doesn't have numerically correct outputs, but there
# should be 4.
self.assertEqual(4, len(detections))
def test_center_net_inference_object_detection(self):
tmp_dir = self.get_temp_dir()
output_directory = os.path.join(tmp_dir, 'output')
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_center_net_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
detections = detect_fn_sig(image)
# The exported graph doesn't have numerically correct outputs, but there
# should be 4.
self.assertEqual(4, len(detections))
def test_center_net_inference_keypoint(self):
tmp_dir = self.get_temp_dir()
output_directory = os.path.join(tmp_dir, 'output')
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config=self._get_center_net_config(),
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
max_detections=10,
use_regular_nms=False,
include_keypoints=True)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
detections = detect_fn_sig(image)
# The exported graph doesn't have numerically correct outputs, but there
# should be 6 (4 for boxes, 2 for keypoints).
self.assertEqual(6, len(detections))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/export_tflite_graph_lib_tf2_test.py | export_tflite_graph_lib_tf2_test.py |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for exporter_lib_v2.py."""
from __future__ import division
import io
import os
import unittest
from absl.testing import parameterized
import numpy as np
from PIL import Image
import six
import tensorflow.compat.v2 as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import pipeline_pb2
from object_detection.utils import dataset_util
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-importing-member,g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=2)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):
return_dict = {'image': self._conv(preprocessed_inputs)}
if 'side_inp_1' in side_inputs:
return_dict['image'] += side_inputs['side_inp_1']
return return_dict
def postprocess(self, prediction_dict, true_image_shapes):
predict_tensor_sum = tf.reduce_sum(prediction_dict['image'])
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]], tf.float32),
'detection_scores': predict_tensor_sum + tf.constant(
[[0.7, 0.6], [0.9, 0.0]], tf.float32),
'detection_classes': tf.constant([[0, 1],
[1, 0]], tf.float32),
'num_detections': tf.constant([2, 1], tf.float32),
}
return postprocessed_tensors
def predict_masks_from_boxes(self, prediction_dict, true_image_shapes, boxes):
output_dict = self.postprocess(prediction_dict, true_image_shapes)
output_dict.update({
'detection_masks': tf.ones(shape=(1, 2, 16), dtype=tf.float32),
})
return output_dict
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase):
def _save_checkpoint_from_mock_model(
self, checkpoint_dir, conv_weight_scalar=6.0):
mock_model = FakeModel(conv_weight_scalar)
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'encoded_image_string_tensor'},
{'input_type': 'tf_example'},
)
def test_export_yields_correct_directory_structure(
self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'variables', 'variables.index')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'variables',
'variables.data-00000-of-00001')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'checkpoint', 'ckpt-0.index')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'pipeline.config')))
def get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.uint8)
if input_type == 'float_image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.float32)
elif input_type == 'encoded_image_string_tensor':
image = Image.new('RGB', (20, 20))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'encoded_image_string_tensor'},
{'input_type': 'tf_example'},
{'input_type': 'float_image_tensor'},
)
def test_export_saved_model_and_run_inference(
self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
image = self.get_dummy_input(input_type)
detections = detect_fn(tf.constant(image))
detection_fields = fields.DetectionResultFields
self.assertAllClose(detections[detection_fields.detection_boxes],
[[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(detections[detection_fields.detection_scores],
[[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(detections[detection_fields.detection_classes],
[[1, 2], [2, 1]])
self.assertAllClose(detections[detection_fields.num_detections], [2, 1])
@parameterized.parameters(
{'use_default_serving': True},
{'use_default_serving': False}
)
def test_export_saved_model_and_run_inference_with_side_inputs(
self, input_type='image_tensor', use_default_serving=True):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
use_side_inputs=True,
side_input_shapes='1/2,2',
side_input_names='side_inp_1,side_inp_2',
side_input_types='tf.float32,tf.uint8')
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.constant(self.get_dummy_input(input_type))
side_input_1 = np.ones((1,), dtype=np.float32)
side_input_2 = np.ones((2, 2), dtype=np.uint8)
if use_default_serving:
detections = detect_fn_sig(input_tensor=image,
side_inp_1=tf.constant(side_input_1),
side_inp_2=tf.constant(side_input_2))
else:
detections = detect_fn(image,
tf.constant(side_input_1),
tf.constant(side_input_2))
detection_fields = fields.DetectionResultFields
self.assertAllClose(detections[detection_fields.detection_boxes],
[[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(detections[detection_fields.detection_scores],
[[400.7, 400.6], [400.9, 400.0]])
self.assertAllClose(detections[detection_fields.detection_classes],
[[1, 2], [2, 1]])
self.assertAllClose(detections[detection_fields.num_detections], [2, 1])
def test_export_checkpoint_and_run_inference_with_image(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
mock_model = FakeModel()
ckpt = tf.compat.v2.train.Checkpoint(
model=mock_model)
checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint')
manager = tf.compat.v2.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=7)
ckpt.restore(manager.latest_checkpoint).expect_partial()
fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
detections = mock_model.postprocess(predictions, true_image_shapes)
# 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3.
self.assertAllClose(detections['detection_scores'],
[[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]])
class DetectionFromImageAndBoxModuleTest(tf.test.TestCase):
def get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor' or input_type == 'image_and_boxes_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.uint8)
if input_type == 'float_image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.float32)
elif input_type == 'encoded_image_string_tensor':
image = Image.new('RGB', (20, 20))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
return [example]
def _save_checkpoint_from_mock_model(self,
checkpoint_dir,
conv_weight_scalar=6.0):
mock_model = FakeModel(conv_weight_scalar)
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def test_export_saved_model_and_run_inference_for_segmentation(
self, input_type='image_and_boxes_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
image = self.get_dummy_input(input_type)
boxes = tf.constant([
[
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8],
],
])
detections = detect_fn(tf.constant(image), boxes)
detection_fields = fields.DetectionResultFields
self.assertIn(detection_fields.detection_masks, detections)
self.assertListEqual(
list(detections[detection_fields.detection_masks].shape), [1, 2, 16])
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/exporter_lib_tf2_test.py | exporter_lib_tf2_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import os
import tempfile
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.tools import freeze_graph # pylint: disable=g-direct-tensorflow-import
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.utils import config_util
from object_detection.utils import shape_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.contrib.quantize.python import graph_matcher
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
freeze_graph_with_def_protos = freeze_graph.freeze_graph_with_def_protos
def parse_side_inputs(side_input_shapes_string, side_input_names_string,
side_input_types_string):
"""Parses side input flags.
Args:
side_input_shapes_string: The shape of the side input tensors, provided as a
comma-separated list of integers. A value of -1 is used for unknown
dimensions. A `/` denotes a break, starting the shape of the next side
input tensor.
side_input_names_string: The names of the side input tensors, provided as a
comma-separated list of strings.
side_input_types_string: The type of the side input tensors, provided as a
comma-separated list of types, each of `string`, `integer`, or `float`.
Returns:
side_input_shapes: A list of shapes.
side_input_names: A list of strings.
side_input_types: A list of tensorflow dtypes.
"""
if side_input_shapes_string:
side_input_shapes = []
for side_input_shape_list in side_input_shapes_string.split('/'):
side_input_shape = [
int(dim) if dim != '-1' else None
for dim in side_input_shape_list.split(',')
]
side_input_shapes.append(side_input_shape)
else:
raise ValueError('When using side_inputs, side_input_shapes must be '
'specified in the input flags.')
if side_input_names_string:
side_input_names = list(side_input_names_string.split(','))
else:
raise ValueError('When using side_inputs, side_input_names must be '
'specified in the input flags.')
if side_input_types_string:
typelookup = {'float': tf.float32, 'int': tf.int32, 'string': tf.string}
side_input_types = [
typelookup[side_input_type]
for side_input_type in side_input_types_string.split(',')
]
else:
raise ValueError('When using side_inputs, side_input_types must be '
'specified in the input flags.')
return side_input_shapes, side_input_names, side_input_types
def rewrite_nn_resize_op(is_quantized=False):
"""Replaces a custom nearest-neighbor resize op with the Tensorflow version.
Some graphs use this custom version for TPU-compatibility.
Args:
is_quantized: True if the default graph is quantized.
"""
def remove_nn():
"""Remove nearest neighbor upsampling structures and replace with TF op."""
input_pattern = graph_matcher.OpTypePattern(
'FakeQuantWithMinMaxVars' if is_quantized else '*')
stack_1_pattern = graph_matcher.OpTypePattern(
'Pack', inputs=[input_pattern, input_pattern], ordered_inputs=False)
stack_2_pattern = graph_matcher.OpTypePattern(
'Pack', inputs=[stack_1_pattern, stack_1_pattern], ordered_inputs=False)
reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[stack_2_pattern, 'Const'], ordered_inputs=False)
consumer_pattern1 = graph_matcher.OpTypePattern(
'Add|AddV2|Max|Mul', inputs=[reshape_pattern, '*'],
ordered_inputs=False)
consumer_pattern2 = graph_matcher.OpTypePattern(
'StridedSlice', inputs=[reshape_pattern, '*', '*', '*'],
ordered_inputs=False)
def replace_matches(consumer_pattern):
"""Search for nearest neighbor pattern and replace with TF op."""
match_counter = 0
matcher = graph_matcher.GraphMatcher(consumer_pattern)
for match in matcher.match_graph(tf.get_default_graph()):
match_counter += 1
projection_op = match.get_op(input_pattern)
reshape_op = match.get_op(reshape_pattern)
consumer_op = match.get_op(consumer_pattern)
nn_resize = tf.image.resize_nearest_neighbor(
projection_op.outputs[0],
reshape_op.outputs[0].shape.dims[1:3],
align_corners=False,
name=os.path.split(reshape_op.name)[0] + '/resize_nearest_neighbor')
for index, op_input in enumerate(consumer_op.inputs):
if op_input == reshape_op.outputs[0]:
consumer_op._update_input(index, nn_resize) # pylint: disable=protected-access
break
return match_counter
match_counter = replace_matches(consumer_pattern1)
match_counter += replace_matches(consumer_pattern2)
tf.logging.info('Found and fixed {} matches'.format(match_counter))
return match_counter
# Applying twice because both inputs to Add could be NN pattern
total_removals = 0
while remove_nn():
total_removals += 1
# This number is chosen based on the nas-fpn architecture.
if total_removals > 4:
raise ValueError('Graph removal encountered a infinite loop.')
def replace_variable_values_with_moving_averages(graph,
current_checkpoint_file,
new_checkpoint_file,
no_ema_collection=None):
"""Replaces variable values in the checkpoint with their moving averages.
If the current checkpoint has shadow variables maintaining moving averages of
the variables defined in the graph, this function generates a new checkpoint
where the variables contain the values of their moving averages.
Args:
graph: a tf.Graph object.
current_checkpoint_file: a checkpoint containing both original variables and
their moving averages.
new_checkpoint_file: file path to write a new checkpoint.
no_ema_collection: A list of namescope substrings to match the variables
to eliminate EMA.
"""
with graph.as_default():
variable_averages = tf.train.ExponentialMovingAverage(0.0)
ema_variables_to_restore = variable_averages.variables_to_restore()
ema_variables_to_restore = config_util.remove_unnecessary_ema(
ema_variables_to_restore, no_ema_collection)
with tf.Session() as sess:
read_saver = tf.train.Saver(ema_variables_to_restore)
read_saver.restore(sess, current_checkpoint_file)
write_saver = tf.train.Saver()
write_saver.save(sess, new_checkpoint_file)
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor
def _side_input_tensor_placeholder(side_input_shape, side_input_name,
side_input_type):
"""Returns side input placeholder and side input tensor."""
side_input_tensor = tf.placeholder(
dtype=side_input_type, shape=side_input_shape, name=side_input_name)
return side_input_tensor, side_input_tensor
def _tf_example_input_placeholder(input_shape=None):
"""Returns input that accepts a batch of strings with tf examples.
Args:
input_shape: the shape to resize the output decoded images to (optional).
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_tf_example_placeholder = tf.placeholder(
tf.string, shape=[None], name='tf_example')
def decode(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
if input_shape is not None:
image_tensor = tf.image.resize(image_tensor, input_shape[1:3])
return image_tensor
return (batch_tf_example_placeholder,
shape_utils.static_or_dynamic_map_fn(
decode,
elems=batch_tf_example_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
def _encoded_image_string_tensor_input_placeholder(input_shape=None):
"""Returns input that accepts a batch of PNG or JPEG strings.
Args:
input_shape: the shape to resize the output decoded images to (optional).
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_image_str_placeholder = tf.placeholder(
dtype=tf.string,
shape=[None],
name='encoded_image_string_tensor')
def decode(encoded_image_string_tensor):
image_tensor = tf.image.decode_image(encoded_image_string_tensor,
channels=3)
image_tensor.set_shape((None, None, 3))
if input_shape is not None:
image_tensor = tf.image.resize(image_tensor, input_shape[1:3])
return image_tensor
return (batch_image_str_placeholder,
tf.map_fn(
decode,
elems=batch_image_str_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder
}
def add_output_tensor_nodes(postprocessed_tensors,
output_collection_name='inference_op'):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_multiclass_scores: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_classes_with_background] for containing class
score distribution for detected boxes including background if any.
* detection_features: (Optional) float32 tensor of shape
[batch, num_boxes, roi_height, roi_width, depth]
containing classifier features
for each detected box
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_keypoints: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_keypoints, 2] containing keypoints for each
detection box.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_multiclass_scores': [batch, max_detections,
num_classes_with_background]
'detection_features': [batch, num_boxes, roi_height, roi_width, depth]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'detection_keypoints': [batch, max_detections, num_keypoints, 2]
(optional).
'num_detections': [batch]
output_collection_name: Name of collection to add output tensors to.
Returns:
A tensor dict containing the added output tensor nodes.
"""
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
multiclass_scores = postprocessed_tensors.get(
detection_fields.detection_multiclass_scores)
box_classifier_features = postprocessed_tensors.get(
detection_fields.detection_features)
raw_boxes = postprocessed_tensors.get(detection_fields.raw_detection_boxes)
raw_scores = postprocessed_tensors.get(detection_fields.raw_detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
outputs = {}
outputs[detection_fields.detection_boxes] = tf.identity(
boxes, name=detection_fields.detection_boxes)
outputs[detection_fields.detection_scores] = tf.identity(
scores, name=detection_fields.detection_scores)
if multiclass_scores is not None:
outputs[detection_fields.detection_multiclass_scores] = tf.identity(
multiclass_scores, name=detection_fields.detection_multiclass_scores)
if box_classifier_features is not None:
outputs[detection_fields.detection_features] = tf.identity(
box_classifier_features,
name=detection_fields.detection_features)
outputs[detection_fields.detection_classes] = tf.identity(
classes, name=detection_fields.detection_classes)
outputs[detection_fields.num_detections] = tf.identity(
num_detections, name=detection_fields.num_detections)
if raw_boxes is not None:
outputs[detection_fields.raw_detection_boxes] = tf.identity(
raw_boxes, name=detection_fields.raw_detection_boxes)
if raw_scores is not None:
outputs[detection_fields.raw_detection_scores] = tf.identity(
raw_scores, name=detection_fields.raw_detection_scores)
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = tf.identity(
keypoints, name=detection_fields.detection_keypoints)
if masks is not None:
outputs[detection_fields.detection_masks] = tf.identity(
masks, name=detection_fields.detection_masks)
for output_key in outputs:
tf.add_to_collection(output_collection_name, outputs[output_key])
return outputs
def write_saved_model(saved_model_path,
frozen_graph_def,
inputs,
outputs):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
saved_model_path: Path to write SavedModel.
frozen_graph_def: tf.GraphDef holding frozen graph.
inputs: A tensor dictionary containing the inputs to a DetectionModel.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
"""
with tf.Graph().as_default():
with tf.Session() as sess:
tf.import_graph_def(frozen_graph_def, name='')
builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)
tensor_info_inputs = {}
if isinstance(inputs, dict):
for k, v in inputs.items():
tensor_info_inputs[k] = tf.saved_model.utils.build_tensor_info(v)
else:
tensor_info_inputs['inputs'] = tf.saved_model.utils.build_tensor_info(
inputs)
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
))
builder.add_meta_graph_and_variables(
sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def write_graph_and_checkpoint(inference_graph_def,
model_path,
input_saver_def,
trained_checkpoint_prefix):
"""Writes the graph and the checkpoint into disk."""
for node in inference_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def, name='')
with tf.Session() as sess:
saver = tf.train.Saver(
saver_def=input_saver_def, save_relative_paths=True)
saver.restore(sess, trained_checkpoint_prefix)
saver.save(sess, model_path)
def _get_outputs_from_inputs(input_tensors, detection_model,
output_collection_name, **side_inputs):
inputs = tf.cast(input_tensors, dtype=tf.float32)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(
preprocessed_inputs, true_image_shapes, **side_inputs)
postprocessed_tensors = detection_model.postprocess(
output_tensors, true_image_shapes)
return add_output_tensor_nodes(postprocessed_tensors,
output_collection_name)
def build_detection_graph(input_type, detection_model, input_shape,
output_collection_name, graph_hook_fn,
use_side_inputs=False, side_input_shapes=None,
side_input_names=None, side_input_types=None):
"""Build the detection graph."""
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
placeholder_args = {}
side_inputs = {}
if input_shape is not None:
if (input_type != 'image_tensor' and
input_type != 'encoded_image_string_tensor' and
input_type != 'tf_example' and
input_type != 'tf_sequence_example'):
raise ValueError('Can only specify input shape for `image_tensor`, '
'`encoded_image_string_tensor`, `tf_example`, '
' or `tf_sequence_example` inputs.')
placeholder_args['input_shape'] = input_shape
placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](
**placeholder_args)
placeholder_tensors = {'inputs': placeholder_tensor}
if use_side_inputs:
for idx, side_input_name in enumerate(side_input_names):
side_input_placeholder, side_input = _side_input_tensor_placeholder(
side_input_shapes[idx], side_input_name, side_input_types[idx])
print(side_input)
side_inputs[side_input_name] = side_input
placeholder_tensors[side_input_name] = side_input_placeholder
outputs = _get_outputs_from_inputs(
input_tensors=input_tensors,
detection_model=detection_model,
output_collection_name=output_collection_name,
**side_inputs)
# Add global step to the graph.
slim.get_or_create_global_step()
if graph_hook_fn: graph_hook_fn()
return outputs, placeholder_tensors
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names=None,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None,
write_inference_graph=False,
temp_checkpoint_prefix='',
use_side_inputs=False,
side_input_shapes=None,
side_input_names=None,
side_input_types=None):
"""Export helper."""
tf.gfile.MakeDirs(output_directory)
frozen_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
saved_model_path = os.path.join(output_directory, 'saved_model')
model_path = os.path.join(output_directory, 'model.ckpt')
outputs, placeholder_tensor_dict = build_detection_graph(
input_type=input_type,
detection_model=detection_model,
input_shape=input_shape,
output_collection_name=output_collection_name,
graph_hook_fn=graph_hook_fn,
use_side_inputs=use_side_inputs,
side_input_shapes=side_input_shapes,
side_input_names=side_input_names,
side_input_types=side_input_types)
profile_inference_graph(tf.get_default_graph())
saver_kwargs = {}
if use_moving_averages:
if not temp_checkpoint_prefix:
# This check is to be compatible with both version of SaverDef.
if os.path.isfile(trained_checkpoint_prefix):
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
else:
temp_checkpoint_prefix = tempfile.mkdtemp()
replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
temp_checkpoint_prefix)
checkpoint_to_use = temp_checkpoint_prefix
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
write_graph_and_checkpoint(
inference_graph_def=tf.get_default_graph().as_graph_def(),
model_path=model_path,
input_saver_def=input_saver_def,
trained_checkpoint_prefix=checkpoint_to_use)
if write_inference_graph:
inference_graph_def = tf.get_default_graph().as_graph_def()
inference_graph_path = os.path.join(output_directory,
'inference_graph.pbtxt')
for node in inference_graph_def.node:
node.device = ''
with tf.gfile.GFile(inference_graph_path, 'wb') as f:
f.write(str(inference_graph_def))
if additional_output_tensor_names is not None:
output_node_names = ','.join(list(outputs.keys())+(
additional_output_tensor_names))
else:
output_node_names = ','.join(outputs.keys())
frozen_graph_def = freeze_graph.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph=frozen_graph_path,
clear_devices=True,
initializer_nodes='')
write_saved_model(saved_model_path, frozen_graph_def,
placeholder_tensor_dict, outputs)
def export_inference_graph(input_type,
pipeline_config,
trained_checkpoint_prefix,
output_directory,
input_shape=None,
output_collection_name='inference_op',
additional_output_tensor_names=None,
write_inference_graph=False,
use_side_inputs=False,
side_input_shapes=None,
side_input_names=None,
side_input_types=None):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of ['image_tensor',
'encoded_image_string_tensor', 'tf_example'].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_prefix: Path to the trained checkpoint file.
output_directory: Path to write outputs.
input_shape: Sets a fixed shape for an `image_tensor` input. If not
specified, will default to [None, None, None, 3].
output_collection_name: Name of collection to add output tensors to.
If None, does not add output tensors to a collection.
additional_output_tensor_names: list of additional output
tensors to include in the frozen graph.
write_inference_graph: If true, writes inference graph to disk.
use_side_inputs: If True, the model requires side_inputs.
side_input_shapes: List of shapes of the side input tensors,
required if use_side_inputs is True.
side_input_names: List of names of the side input tensors,
required if use_side_inputs is True.
side_input_types: List of types of the side input tensors,
required if use_side_inputs is True.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
graph_rewriter_fn = None
if pipeline_config.HasField('graph_rewriter'):
graph_rewriter_config = pipeline_config.graph_rewriter
graph_rewriter_fn = graph_rewriter_builder.build(graph_rewriter_config,
is_training=False)
_export_inference_graph(
input_type,
detection_model,
pipeline_config.eval_config.use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names,
input_shape,
output_collection_name,
graph_hook_fn=graph_rewriter_fn,
write_inference_graph=write_inference_graph,
use_side_inputs=use_side_inputs,
side_input_shapes=side_input_shapes,
side_input_names=side_input_names,
side_input_types=side_input_types)
pipeline_config.eval_config.use_moving_averages = False
config_util.save_pipeline_config(pipeline_config, output_directory)
def profile_inference_graph(graph):
"""Profiles the inference graph.
Prints model parameters and computation FLOPs given an inference graph.
BatchNorms are excluded from the parameter count due to the fact that
BatchNorms are usually folded. BatchNorm, Initializer, Regularizer
and BiasAdd are not considered in FLOP count.
Args:
graph: the inference graph.
"""
tfprof_vars_option = (
contrib_tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
tfprof_flops_option = contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS
# Batchnorm is usually folded during inference.
tfprof_vars_option['trim_name_regexes'] = ['.*BatchNorm.*']
# Initializer and Regularizer are only used in training.
tfprof_flops_option['trim_name_regexes'] = [
'.*BatchNorm.*', '.*Initializer.*', '.*Regularizer.*', '.*BiasAdd.*'
]
contrib_tfprof.model_analyzer.print_model_analysis(
graph, tfprof_options=tfprof_vars_option)
contrib_tfprof.model_analyzer.print_model_analysis(
graph, tfprof_options=tfprof_flops_option)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/exporter.py | exporter.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object detection model library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection import inputs
from object_detection import model_hparams
from object_detection import model_lib
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import tf_version
# Model for test. Options are:
# 'ssd_inception_v2_pets', 'faster_rcnn_resnet50_pets'
MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets'
# Model for testing keypoints.
MODEL_NAME_FOR_KEYPOINTS_TEST = 'ssd_mobilenet_v1_fpp'
# Model for testing tfSequenceExample inputs.
MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST = 'context_rcnn_camera_trap'
def _get_data_path(model_name):
"""Returns an absolute path to TFRecord file."""
if model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST:
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
'snapshot_serengeti_sequence_examples.record')
else:
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
'pets_examples.record')
def get_pipeline_config_path(model_name):
"""Returns path to the local pipeline config file."""
if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST:
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
model_name + '.config')
elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST:
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
model_name + '.config')
else:
return os.path.join(tf.resource_loader.get_data_files_path(), 'samples',
'configs', model_name + '.config')
def _get_labelmap_path():
"""Returns an absolute path to label map file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'data',
'pet_label_map.pbtxt')
def _get_keypoints_labelmap_path():
"""Returns an absolute path to label map file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'data',
'face_person_with_keypoints_label_map.pbtxt')
def _get_sequence_example_labelmap_path():
"""Returns an absolute path to label map file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'data',
'snapshot_serengeti_label_map.pbtxt')
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
filename = get_pipeline_config_path(model_name)
data_path = _get_data_path(model_name)
if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST:
label_map_path = _get_keypoints_labelmap_path()
elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST:
label_map_path = _get_sequence_example_labelmap_path()
else:
label_map_path = _get_labelmap_path()
configs = config_util.get_configs_from_pipeline_file(filename)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path
}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
return configs
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class ModelLibTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
tf.reset_default_graph()
def _assert_model_fn_for_train_eval(self, configs, mode,
class_agnostic=False):
model_config = configs['model']
train_config = configs['train_config']
with tf.Graph().as_default():
if mode == 'train':
features, labels = _make_initializable_iterator(
inputs.create_train_input_fn(configs['train_config'],
configs['train_input_config'],
configs['model'])()).get_next()
model_mode = tf.estimator.ModeKeys.TRAIN
batch_size = train_config.batch_size
elif mode == 'eval':
features, labels = _make_initializable_iterator(
inputs.create_eval_input_fn(configs['eval_config'],
configs['eval_input_config'],
configs['model'])()).get_next()
model_mode = tf.estimator.ModeKeys.EVAL
batch_size = 1
elif mode == 'eval_on_train':
features, labels = _make_initializable_iterator(
inputs.create_eval_input_fn(configs['eval_config'],
configs['train_input_config'],
configs['model'])()).get_next()
model_mode = tf.estimator.ModeKeys.EVAL
batch_size = 1
detection_model_fn = functools.partial(
model_builder.build, model_config=model_config, is_training=True)
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams)
estimator_spec = model_fn(features, labels, model_mode)
self.assertIsNotNone(estimator_spec.loss)
self.assertIsNotNone(estimator_spec.predictions)
if mode == 'eval' or mode == 'eval_on_train':
if class_agnostic:
self.assertNotIn('detection_classes', estimator_spec.predictions)
else:
detection_classes = estimator_spec.predictions['detection_classes']
self.assertEqual(batch_size, detection_classes.shape.as_list()[0])
self.assertEqual(tf.float32, detection_classes.dtype)
detection_boxes = estimator_spec.predictions['detection_boxes']
detection_scores = estimator_spec.predictions['detection_scores']
num_detections = estimator_spec.predictions['num_detections']
self.assertEqual(batch_size, detection_boxes.shape.as_list()[0])
self.assertEqual(tf.float32, detection_boxes.dtype)
self.assertEqual(batch_size, detection_scores.shape.as_list()[0])
self.assertEqual(tf.float32, detection_scores.dtype)
self.assertEqual(tf.float32, num_detections.dtype)
if mode == 'eval':
self.assertIn('Detections_Left_Groundtruth_Right/0',
estimator_spec.eval_metric_ops)
if model_mode == tf.estimator.ModeKeys.TRAIN:
self.assertIsNotNone(estimator_spec.train_op)
return estimator_spec
def _assert_model_fn_for_predict(self, configs):
model_config = configs['model']
with tf.Graph().as_default():
features, _ = _make_initializable_iterator(
inputs.create_eval_input_fn(configs['eval_config'],
configs['eval_input_config'],
configs['model'])()).get_next()
detection_model_fn = functools.partial(
model_builder.build, model_config=model_config, is_training=False)
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams)
estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT)
self.assertIsNone(estimator_spec.loss)
self.assertIsNone(estimator_spec.train_op)
self.assertIsNotNone(estimator_spec.predictions)
self.assertIsNotNone(estimator_spec.export_outputs)
self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
estimator_spec.export_outputs)
def test_model_fn_in_train_mode(self):
"""Tests the model function in TRAIN mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_model_fn_for_train_eval(configs, 'train')
def test_model_fn_in_train_mode_sequences(self):
"""Tests the model function in TRAIN mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST)
self._assert_model_fn_for_train_eval(configs, 'train')
def test_model_fn_in_train_mode_freeze_all_variables(self):
"""Tests model_fn TRAIN mode with all variables frozen."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
configs['train_config'].freeze_variables.append('.*')
with self.assertRaisesRegexp(ValueError, 'No variables to optimize'):
self._assert_model_fn_for_train_eval(configs, 'train')
def test_model_fn_in_train_mode_freeze_all_included_variables(self):
"""Tests model_fn TRAIN mode with all included variables frozen."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
train_config = configs['train_config']
train_config.update_trainable_variables.append('FeatureExtractor')
train_config.freeze_variables.append('.*')
with self.assertRaisesRegexp(ValueError, 'No variables to optimize'):
self._assert_model_fn_for_train_eval(configs, 'train')
def test_model_fn_in_train_mode_freeze_box_predictor(self):
"""Tests model_fn TRAIN mode with FeatureExtractor variables frozen."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
train_config = configs['train_config']
train_config.update_trainable_variables.append('FeatureExtractor')
train_config.update_trainable_variables.append('BoxPredictor')
train_config.freeze_variables.append('FeatureExtractor')
self._assert_model_fn_for_train_eval(configs, 'train')
def test_model_fn_in_eval_mode(self):
"""Tests the model function in EVAL mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_model_fn_for_train_eval(configs, 'eval')
def test_model_fn_in_eval_mode_sequences(self):
"""Tests the model function in EVAL mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST)
self._assert_model_fn_for_train_eval(configs, 'eval')
def test_model_fn_in_keypoints_eval_mode(self):
"""Tests the model function in EVAL mode with keypoints config."""
configs = _get_configs_for_model(MODEL_NAME_FOR_KEYPOINTS_TEST)
estimator_spec = self._assert_model_fn_for_train_eval(configs, 'eval')
metric_ops = estimator_spec.eval_metric_ops
self.assertIn('Keypoints_Precision/mAP ByCategory/face', metric_ops)
self.assertIn('Keypoints_Precision/mAP ByCategory/PERSON', metric_ops)
detection_keypoints = estimator_spec.predictions['detection_keypoints']
self.assertEqual(1, detection_keypoints.shape.as_list()[0])
self.assertEqual(tf.float32, detection_keypoints.dtype)
def test_model_fn_in_eval_on_train_mode(self):
"""Tests the model function in EVAL mode with train data."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_model_fn_for_train_eval(configs, 'eval_on_train')
def test_model_fn_in_predict_mode(self):
"""Tests the model function in PREDICT mode."""
configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)
self._assert_model_fn_for_predict(configs)
def test_create_estimator_and_inputs(self):
"""Tests that Estimator and input function are constructed correctly."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
self.assertIsInstance(estimator, tf.estimator.Estimator)
self.assertEqual(20, train_steps)
self.assertIn('train_input_fn', train_and_eval_dict)
self.assertIn('eval_input_fns', train_and_eval_dict)
self.assertIn('eval_on_train_input_fn', train_and_eval_dict)
def test_create_estimator_and_inputs_sequence_example(self):
"""Tests that Estimator and input function are constructed correctly."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(
MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST)
train_steps = 20
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
self.assertIsInstance(estimator, tf.estimator.Estimator)
self.assertEqual(20, train_steps)
self.assertIn('train_input_fn', train_and_eval_dict)
self.assertIn('eval_input_fns', train_and_eval_dict)
self.assertIn('eval_on_train_input_fn', train_and_eval_dict)
def test_create_estimator_with_default_train_eval_steps(self):
"""Tests that number of train/eval defaults to config values."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
config_train_steps = configs['train_config'].num_steps
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config, hparams, pipeline_config_path)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
self.assertIsInstance(estimator, tf.estimator.Estimator)
self.assertEqual(config_train_steps, train_steps)
def test_create_tpu_estimator_and_inputs(self):
"""Tests that number of train/eval defaults to config values."""
run_config = tf.estimator.tpu.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
use_tpu_estimator=True)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
self.assertIsInstance(estimator, tf.estimator.tpu.TPUEstimator)
self.assertEqual(20, train_steps)
def test_create_train_and_eval_specs(self):
"""Tests that `TrainSpec` and `EvalSpec` is created correctly."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps)
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
train_spec, eval_specs = model_lib.create_train_and_eval_specs(
train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=True,
final_exporter_name='exporter',
eval_spec_names=['holdout'])
self.assertEqual(train_steps, train_spec.max_steps)
self.assertEqual(2, len(eval_specs))
self.assertEqual(None, eval_specs[0].steps)
self.assertEqual('holdout', eval_specs[0].name)
self.assertEqual('exporter', eval_specs[0].exporters[0].name)
self.assertEqual(None, eval_specs[1].steps)
self.assertEqual('eval_on_train', eval_specs[1].name)
def test_experiment(self):
"""Tests that the `Experiment` object is constructed correctly."""
run_config = tf.estimator.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
experiment = model_lib.populate_experiment(
run_config,
hparams,
pipeline_config_path,
train_steps=10,
eval_steps=20)
self.assertEqual(10, experiment.train_steps)
self.assertEqual(None, experiment.eval_steps)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class UnbatchTensorsTest(tf.test.TestCase):
def test_unbatch_without_unpadding(self):
image_placeholder = tf.placeholder(tf.float32, [2, None, None, None])
groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, None, None])
groundtruth_classes_placeholder = tf.placeholder(tf.float32,
[2, None, None])
groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, None])
tensor_dict = {
fields.InputDataFields.image:
image_placeholder,
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes_placeholder,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes_placeholder,
fields.InputDataFields.groundtruth_weights:
groundtruth_weights_placeholder
}
unbatched_tensor_dict = model_lib.unstack_batch(
tensor_dict, unpad_groundtruth_tensors=False)
with self.test_session() as sess:
unbatched_tensor_dict_out = sess.run(
unbatched_tensor_dict,
feed_dict={
image_placeholder:
np.random.rand(2, 4, 4, 3).astype(np.float32),
groundtruth_boxes_placeholder:
np.random.rand(2, 5, 4).astype(np.float32),
groundtruth_classes_placeholder:
np.random.rand(2, 5, 6).astype(np.float32),
groundtruth_weights_placeholder:
np.random.rand(2, 5).astype(np.float32)
})
for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]:
self.assertAllEqual(image_out.shape, [4, 4, 3])
for groundtruth_boxes_out in unbatched_tensor_dict_out[
fields.InputDataFields.groundtruth_boxes]:
self.assertAllEqual(groundtruth_boxes_out.shape, [5, 4])
for groundtruth_classes_out in unbatched_tensor_dict_out[
fields.InputDataFields.groundtruth_classes]:
self.assertAllEqual(groundtruth_classes_out.shape, [5, 6])
for groundtruth_weights_out in unbatched_tensor_dict_out[
fields.InputDataFields.groundtruth_weights]:
self.assertAllEqual(groundtruth_weights_out.shape, [5])
def test_unbatch_and_unpad_groundtruth_tensors(self):
image_placeholder = tf.placeholder(tf.float32, [2, None, None, None])
groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, 5, None])
groundtruth_classes_placeholder = tf.placeholder(tf.float32, [2, 5, None])
groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, 5])
num_groundtruth_placeholder = tf.placeholder(tf.int32, [2])
tensor_dict = {
fields.InputDataFields.image:
image_placeholder,
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes_placeholder,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes_placeholder,
fields.InputDataFields.groundtruth_weights:
groundtruth_weights_placeholder,
fields.InputDataFields.num_groundtruth_boxes:
num_groundtruth_placeholder
}
unbatched_tensor_dict = model_lib.unstack_batch(
tensor_dict, unpad_groundtruth_tensors=True)
with self.test_session() as sess:
unbatched_tensor_dict_out = sess.run(
unbatched_tensor_dict,
feed_dict={
image_placeholder:
np.random.rand(2, 4, 4, 3).astype(np.float32),
groundtruth_boxes_placeholder:
np.random.rand(2, 5, 4).astype(np.float32),
groundtruth_classes_placeholder:
np.random.rand(2, 5, 6).astype(np.float32),
groundtruth_weights_placeholder:
np.random.rand(2, 5).astype(np.float32),
num_groundtruth_placeholder:
np.array([3, 3], np.int32)
})
for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]:
self.assertAllEqual(image_out.shape, [4, 4, 3])
for groundtruth_boxes_out in unbatched_tensor_dict_out[
fields.InputDataFields.groundtruth_boxes]:
self.assertAllEqual(groundtruth_boxes_out.shape, [3, 4])
for groundtruth_classes_out in unbatched_tensor_dict_out[
fields.InputDataFields.groundtruth_classes]:
self.assertAllEqual(groundtruth_classes_out.shape, [3, 6])
for groundtruth_weights_out in unbatched_tensor_dict_out[
fields.InputDataFields.groundtruth_weights]:
self.assertAllEqual(groundtruth_weights_out.shape, [3])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_lib_tf1_test.py | model_lib_tf1_test.py |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.export_tflite_ssd_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import six
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.core.framework import types_pb2
from object_detection import export_tflite_ssd_graph_lib
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import post_processing_pb2
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if six.PY2:
import mock
else:
from unittest import mock # pylint: disable=g-importing-member
# pylint: enable=g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self, add_detection_masks=False):
self._add_detection_masks = add_detection_masks
def preprocess(self, inputs):
pass
def predict(self, preprocessed_inputs, true_image_shapes):
features = slim.conv2d(preprocessed_inputs, 3, 1)
with tf.control_dependencies([features]):
prediction_tensors = {
'box_encodings':
tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]],
tf.float32),
'class_predictions_with_background':
tf.constant([[[0.7, 0.6], [0.9, 0.0]]], tf.float32),
}
with tf.control_dependencies(
[tf.convert_to_tensor(features.get_shape().as_list()[1:3])]):
prediction_tensors['anchors'] = tf.constant(
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32)
return prediction_tensors
def postprocess(self, prediction_tensors, true_image_shapes):
pass
def restore_map(self, checkpoint_path, from_detection_checkpoint):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class ExportTfliteGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self,
checkpoint_path,
use_moving_averages,
quantize=False,
num_channels=3):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel()
inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, num_channels])
mock_model.predict(inputs, true_image_shapes=None)
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
tf.train.get_or_create_global_step()
if quantize:
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_config.quantization.delay = 500000
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _assert_quant_vars_exists(self, tflite_graph_file):
with tf.gfile.Open(tflite_graph_file, mode='rb') as f:
graph_string = f.read()
print(graph_string)
self.assertIn(six.ensure_binary('quant'), graph_string)
def _import_graph_and_run_inference(self, tflite_graph_file, num_channels=3):
"""Imports a tflite graph, runs single inference and returns outputs."""
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.Open(tflite_graph_file, mode='rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
input_tensor = graph.get_tensor_by_name('normalized_input_image_tensor:0')
box_encodings = graph.get_tensor_by_name('raw_outputs/box_encodings:0')
class_predictions = graph.get_tensor_by_name(
'raw_outputs/class_predictions:0')
with self.test_session(graph) as sess:
[box_encodings_np, class_predictions_np] = sess.run(
[box_encodings, class_predictions],
feed_dict={input_tensor: np.random.rand(1, 10, 10, num_channels)})
return box_encodings_np, class_predictions_np
def _export_graph(self,
pipeline_config,
num_channels=3,
additional_output_tensors=()):
"""Exports a tflite graph."""
output_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt')
tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb')
quantize = pipeline_config.HasField('graph_rewriter')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix,
use_moving_averages=pipeline_config.eval_config.use_moving_averages,
quantize=quantize,
num_channels=num_channels)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
with tf.Graph().as_default():
tf.identity(
tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor')
export_tflite_ssd_graph_lib.export_tflite_graph(
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_dir=output_dir,
add_postprocessing_op=False,
max_detections=10,
max_classes_per_detection=1,
additional_output_tensors=additional_output_tensors)
return tflite_graph_file
def _export_graph_with_postprocessing_op(self,
pipeline_config,
num_channels=3,
additional_output_tensors=()):
"""Exports a tflite graph with custom postprocessing op."""
output_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt')
tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb')
quantize = pipeline_config.HasField('graph_rewriter')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix,
use_moving_averages=pipeline_config.eval_config.use_moving_averages,
quantize=quantize,
num_channels=num_channels)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
with tf.Graph().as_default():
tf.identity(
tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor')
export_tflite_ssd_graph_lib.export_tflite_graph(
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_dir=output_dir,
add_postprocessing_op=True,
max_detections=10,
max_classes_per_detection=1,
additional_output_tensors=additional_output_tensors)
return tflite_graph_file
def test_export_tflite_graph_with_moving_averages(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_without_moving_averages(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_grayscale(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer
).convert_to_grayscale = True
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config, num_channels=1)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np,
class_predictions_np) = self._import_graph_and_run_inference(
tflite_graph_file, num_channels=1)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_with_quantization(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.graph_rewriter.quantization.delay = 500000
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
self._assert_quant_vars_exists(tflite_graph_file)
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_with_softmax_score_conversion(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.post_processing.score_converter = (
post_processing_pb2.PostProcessing.SOFTMAX)
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np,
[[[0.524979, 0.475021], [0.710949, 0.28905]]])
def test_export_tflite_graph_with_sigmoid_score_conversion(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.post_processing.score_converter = (
post_processing_pb2.PostProcessing.SIGMOID)
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np,
[[[0.668188, 0.645656], [0.710949, 0.5]]])
def test_export_tflite_graph_with_postprocessing_op(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.post_processing.score_converter = (
post_processing_pb2.PostProcessing.SIGMOID)
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.Open(tflite_graph_file, mode='rb') as f:
graph_def.ParseFromString(f.read())
all_op_names = [node.name for node in graph_def.node]
self.assertIn('TFLite_Detection_PostProcess', all_op_names)
self.assertNotIn('UnattachedTensor', all_op_names)
for node in graph_def.node:
if node.name == 'TFLite_Detection_PostProcess':
self.assertTrue(node.attr['_output_quantized'].b)
self.assertTrue(
node.attr['_support_output_type_float_in_quantized_op'].b)
self.assertEqual(node.attr['y_scale'].f, 10.0)
self.assertEqual(node.attr['x_scale'].f, 10.0)
self.assertEqual(node.attr['h_scale'].f, 5.0)
self.assertEqual(node.attr['w_scale'].f, 5.0)
self.assertEqual(node.attr['num_classes'].i, 2)
self.assertTrue(
all([
t == types_pb2.DT_FLOAT
for t in node.attr['_output_types'].list.type
]))
def test_export_tflite_graph_with_additional_tensors(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
tflite_graph_file = self._export_graph(
pipeline_config, additional_output_tensors=['UnattachedTensor'])
self.assertTrue(os.path.exists(tflite_graph_file))
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.Open(tflite_graph_file, mode='rb') as f:
graph_def.ParseFromString(f.read())
all_op_names = [node.name for node in graph_def.node]
self.assertIn('UnattachedTensor', all_op_names)
def test_export_tflite_graph_with_postprocess_op_and_additional_tensors(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.post_processing.score_converter = (
post_processing_pb2.PostProcessing.SIGMOID)
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config, additional_output_tensors=['UnattachedTensor'])
self.assertTrue(os.path.exists(tflite_graph_file))
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.Open(tflite_graph_file, mode='rb') as f:
graph_def.ParseFromString(f.read())
all_op_names = [node.name for node in graph_def.node]
self.assertIn('TFLite_Detection_PostProcess', all_op_names)
self.assertIn('UnattachedTensor', all_op_names)
@mock.patch.object(exporter, 'rewrite_nn_resize_op')
def test_export_with_nn_resize_op_not_called_without_fpn(self, mock_get):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
mock_get.assert_not_called()
@mock.patch.object(exporter, 'rewrite_nn_resize_op')
def test_export_with_nn_resize_op_called_with_fpn(self, mock_get):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.feature_extractor.fpn.min_level = 3
pipeline_config.model.ssd.feature_extractor.fpn.max_level = 7
tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
self.assertEqual(1, mock_get.call_count)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/export_tflite_ssd_graph_lib_tf1_test.py | export_tflite_ssd_graph_lib_tf1_test.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Tool to export an object detection model for inference.
Prepares an object detection tensorflow graph for inference using model
configuration and a trained checkpoint. Outputs inference
graph, associated checkpoint files, a frozen inference graph and a
SavedModel (https://tensorflow.github.io/serving/serving_basic.html).
The inference graph contains one of three input nodes depending on the user
specified option.
* `image_tensor`: Accepts a uint8 4-D tensor of shape [None, None, None, 3]
* `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None]
containing encoded PNG or JPEG images. Image resolutions are expected to be
the same if more than 1 image is provided.
* `tf_example`: Accepts a 1-D string tensor of shape [None] containing
serialized TFExample protos. Image resolutions are expected to be the same
if more than 1 image is provided.
and the following output nodes returned by the model.postprocess(..):
* `num_detections`: Outputs float32 tensors of the form [batch]
that specifies the number of valid boxes per image in the batch.
* `detection_boxes`: Outputs float32 tensors of the form
[batch, num_boxes, 4] containing detected boxes.
* `detection_scores`: Outputs float32 tensors of the form
[batch, num_boxes] containing class scores for the detections.
* `detection_classes`: Outputs float32 tensors of the form
[batch, num_boxes] containing classes for the detections.
* `raw_detection_boxes`: Outputs float32 tensors of the form
[batch, raw_num_boxes, 4] containing detection boxes without
post-processing.
* `raw_detection_scores`: Outputs float32 tensors of the form
[batch, raw_num_boxes, num_classes_with_background] containing class score
logits for raw detection boxes.
* `detection_masks`: (Optional) Outputs float32 tensors of the form
[batch, num_boxes, mask_height, mask_width] containing predicted instance
masks for each box if its present in the dictionary of postprocessed
tensors returned by the model.
* detection_multiclass_scores: (Optional) Outputs float32 tensor of shape
[batch, num_boxes, num_classes_with_background] for containing class
score distribution for detected boxes including background if any.
* detection_features: (Optional) float32 tensor of shape
[batch, num_boxes, roi_height, roi_width, depth]
containing classifier features
Notes:
* This tool uses `use_moving_averages` from eval_config to decide which
weights to freeze.
Example Usage:
--------------
python export_inference_graph.py \
--input_type image_tensor \
--pipeline_config_path path/to/ssd_inception_v2.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
The expected output would be in the directory
path/to/exported_model_directory (which is created if it does not exist)
with contents:
- inference_graph.pbtxt
- model.ckpt.data-00000-of-00001
- model.ckpt.info
- model.ckpt.meta
- frozen_inference_graph.pb
+ saved_model (a directory)
Config overrides (see the `config_override` flag) are text protobufs
(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
certain fields in the provided pipeline_config_path. These are useful for
making small changes to the inference graph that differ from the training or
eval config.
Example Usage (in which we change the second stage post-processing score
threshold to be 0.5):
python export_inference_graph.py \
--input_type image_tensor \
--pipeline_config_path path/to/ssd_inception_v2.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory \
--config_override " \
model{ \
faster_rcnn { \
second_stage_post_processing { \
batch_non_max_suppression { \
score_threshold: 0.5 \
} \
} \
} \
}"
"""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection import exporter
from object_detection.protos import pipeline_pb2
flags = tf.app.flags
flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be '
'one of [`image_tensor`, `encoded_image_string_tensor`, '
'`tf_example`]')
flags.DEFINE_string('input_shape', None,
'If input_type is `image_tensor`, this can explicitly set '
'the shape of this input tensor to a fixed size. The '
'dimensions are to be provided as a comma-separated list '
'of integers. A value of -1 can be used for unknown '
'dimensions. If not specified, for an `image_tensor, the '
'default shape will be partially specified as '
'`[None, None, None, 3]`.')
flags.DEFINE_string('pipeline_config_path', None,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.')
flags.DEFINE_string('trained_checkpoint_prefix', None,
'Path to trained checkpoint, typically of the form '
'path/to/model.ckpt')
flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
flags.DEFINE_string('config_override', '',
'pipeline_pb2.TrainEvalPipelineConfig '
'text proto to override pipeline_config_path.')
flags.DEFINE_boolean('write_inference_graph', False,
'If true, writes inference graph to disk.')
flags.DEFINE_string('additional_output_tensor_names', None,
'Additional Tensors to output, to be specified as a comma '
'separated list of tensor names.')
flags.DEFINE_boolean('use_side_inputs', False,
'If True, uses side inputs as well as image inputs.')
flags.DEFINE_string('side_input_shapes', None,
'If use_side_inputs is True, this explicitly sets '
'the shape of the side input tensors to a fixed size. The '
'dimensions are to be provided as a comma-separated list '
'of integers. A value of -1 can be used for unknown '
'dimensions. A `/` denotes a break, starting the shape of '
'the next side input tensor. This flag is required if '
'using side inputs.')
flags.DEFINE_string('side_input_types', None,
'If use_side_inputs is True, this explicitly sets '
'the type of the side input tensors. The '
'dimensions are to be provided as a comma-separated list '
'of types, each of `string`, `integer`, or `float`. '
'This flag is required if using side inputs.')
flags.DEFINE_string('side_input_names', None,
'If use_side_inputs is True, this explicitly sets '
'the names of the side input tensors required by the model '
'assuming the names will be a comma-separated list of '
'strings. This flag is required if using side inputs.')
tf.app.flags.mark_flag_as_required('pipeline_config_path')
tf.app.flags.mark_flag_as_required('trained_checkpoint_prefix')
tf.app.flags.mark_flag_as_required('output_directory')
FLAGS = flags.FLAGS
def main(_):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
text_format.Merge(FLAGS.config_override, pipeline_config)
if FLAGS.input_shape:
input_shape = [
int(dim) if dim != '-1' else None
for dim in FLAGS.input_shape.split(',')
]
else:
input_shape = None
if FLAGS.use_side_inputs:
side_input_shapes, side_input_names, side_input_types = (
exporter.parse_side_inputs(
FLAGS.side_input_shapes,
FLAGS.side_input_names,
FLAGS.side_input_types))
else:
side_input_shapes = None
side_input_names = None
side_input_types = None
if FLAGS.additional_output_tensor_names:
additional_output_tensor_names = list(
FLAGS.additional_output_tensor_names.split(','))
else:
additional_output_tensor_names = None
exporter.export_inference_graph(
FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_prefix,
FLAGS.output_directory, input_shape=input_shape,
write_inference_graph=FLAGS.write_inference_graph,
additional_output_tensor_names=additional_output_tensor_names,
use_side_inputs=FLAGS.use_side_inputs,
side_input_shapes=side_input_shapes,
side_input_names=side_input_names,
side_input_types=side_input_types)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/export_inference_graph.py | export_inference_graph.py |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Tool to export an object detection model for inference.
Prepares an object detection tensorflow graph for inference using model
configuration and a trained checkpoint. Outputs associated checkpoint files,
a SavedModel, and a copy of the model config.
The inference graph contains one of three input nodes depending on the user
specified option.
* `image_tensor`: Accepts a uint8 4-D tensor of shape [1, None, None, 3]
* `float_image_tensor`: Accepts a float32 4-D tensor of shape
[1, None, None, 3]
* `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None]
containing encoded PNG or JPEG images. Image resolutions are expected to be
the same if more than 1 image is provided.
* `tf_example`: Accepts a 1-D string tensor of shape [None] containing
serialized TFExample protos. Image resolutions are expected to be the same
if more than 1 image is provided.
* `image_and_boxes_tensor`: Accepts a 4-D image tensor of size
[1, None, None, 3] and a boxes tensor of size [1, None, 4] of normalized
bounding boxes. To be able to support this option, the model needs
to implement a predict_masks_from_boxes method. See the documentation
for DetectionFromImageAndBoxModule for details.
and the following output nodes returned by the model.postprocess(..):
* `num_detections`: Outputs float32 tensors of the form [batch]
that specifies the number of valid boxes per image in the batch.
* `detection_boxes`: Outputs float32 tensors of the form
[batch, num_boxes, 4] containing detected boxes.
* `detection_scores`: Outputs float32 tensors of the form
[batch, num_boxes] containing class scores for the detections.
* `detection_classes`: Outputs float32 tensors of the form
[batch, num_boxes] containing classes for the detections.
Example Usage:
--------------
python exporter_main_v2.py \
--input_type image_tensor \
--pipeline_config_path path/to/ssd_inception_v2.config \
--trained_checkpoint_dir path/to/checkpoint \
--output_directory path/to/exported_model_directory
--use_side_inputs True/False \
--side_input_shapes dim_0,dim_1,...dim_a/.../dim_0,dim_1,...,dim_z \
--side_input_names name_a,name_b,...,name_c \
--side_input_types type_1,type_2
The expected output would be in the directory
path/to/exported_model_directory (which is created if it does not exist)
holding two subdirectories (corresponding to checkpoint and SavedModel,
respectively) and a copy of the pipeline config.
Config overrides (see the `config_override` flag) are text protobufs
(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
certain fields in the provided pipeline_config_path. These are useful for
making small changes to the inference graph that differ from the training or
eval config.
Example Usage (in which we change the second stage post-processing score
threshold to be 0.5):
python exporter_main_v2.py \
--input_type image_tensor \
--pipeline_config_path path/to/ssd_inception_v2.config \
--trained_checkpoint_dir path/to/checkpoint \
--output_directory path/to/exported_model_directory \
--config_override " \
model{ \
faster_rcnn { \
second_stage_post_processing { \
batch_non_max_suppression { \
score_threshold: 0.5 \
} \
} \
} \
}"
If side inputs are desired, the following arguments could be appended
(the example below is for Context R-CNN).
--use_side_inputs True \
--side_input_shapes 1,2000,2057/1 \
--side_input_names context_features,valid_context_size \
--side_input_types tf.float32,tf.int32
"""
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
from object_detection import exporter_lib_v2
from object_detection.protos import pipeline_pb2
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be '
'one of [`image_tensor`, `encoded_image_string_tensor`, '
'`tf_example`, `float_image_tensor`, '
'`image_and_boxes_tensor`]')
flags.DEFINE_string('pipeline_config_path', None,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.')
flags.DEFINE_string('trained_checkpoint_dir', None,
'Path to trained checkpoint directory')
flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
flags.DEFINE_string('config_override', '',
'pipeline_pb2.TrainEvalPipelineConfig '
'text proto to override pipeline_config_path.')
flags.DEFINE_boolean('use_side_inputs', False,
'If True, uses side inputs as well as image inputs.')
flags.DEFINE_string('side_input_shapes', '',
'If use_side_inputs is True, this explicitly sets '
'the shape of the side input tensors to a fixed size. The '
'dimensions are to be provided as a comma-separated list '
'of integers. A value of -1 can be used for unknown '
'dimensions. A `/` denotes a break, starting the shape of '
'the next side input tensor. This flag is required if '
'using side inputs.')
flags.DEFINE_string('side_input_types', '',
'If use_side_inputs is True, this explicitly sets '
'the type of the side input tensors. The '
'dimensions are to be provided as a comma-separated list '
'of types, each of `string`, `integer`, or `float`. '
'This flag is required if using side inputs.')
flags.DEFINE_string('side_input_names', '',
'If use_side_inputs is True, this explicitly sets '
'the names of the side input tensors required by the model '
'assuming the names will be a comma-separated list of '
'strings. This flag is required if using side inputs.')
flags.mark_flag_as_required('pipeline_config_path')
flags.mark_flag_as_required('trained_checkpoint_dir')
flags.mark_flag_as_required('output_directory')
def main(_):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
text_format.Merge(FLAGS.config_override, pipeline_config)
exporter_lib_v2.export_inference_graph(
FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir,
FLAGS.output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes,
FLAGS.side_input_types, FLAGS.side_input_names)
if __name__ == '__main__':
app.run(main)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/exporter_main_v2.py | exporter_main_v2.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for eval_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection import eval_util
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.protos import eval_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
class EvalUtilTest(test_case.TestCase, parameterized.TestCase):
def _get_categories_list(self):
return [{'id': 1, 'name': 'person'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'cat'}]
def _get_categories_list_with_keypoints(self):
return [{
'id': 1,
'name': 'person',
'keypoints': {
'left_eye': 0,
'right_eye': 3
}
}, {
'id': 2,
'name': 'dog',
'keypoints': {
'tail_start': 1,
'mouth': 2
}
}, {
'id': 3,
'name': 'cat'
}]
def _make_evaluation_dict(self,
resized_groundtruth_masks=False,
batch_size=1,
max_gt_boxes=None,
scale_to_absolute=False):
input_data_fields = fields.InputDataFields
detection_fields = fields.DetectionResultFields
image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8)
if batch_size == 1:
key = tf.constant('image1')
else:
key = tf.constant([str(i) for i in range(batch_size)])
detection_boxes = tf.tile(tf.constant([[[0., 0., 1., 1.]]]),
multiples=[batch_size, 1, 1])
detection_scores = tf.tile(tf.constant([[0.8]]), multiples=[batch_size, 1])
detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1])
detection_masks = tf.tile(tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32),
multiples=[batch_size, 1, 1, 1])
num_detections = tf.ones([batch_size])
groundtruth_boxes = tf.constant([[0., 0., 1., 1.]])
groundtruth_classes = tf.constant([1])
groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8)
original_image_spatial_shapes = tf.constant([[20, 20]], dtype=tf.int32)
groundtruth_keypoints = tf.constant([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]])
if resized_groundtruth_masks:
groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8)
if batch_size > 1:
groundtruth_boxes = tf.tile(tf.expand_dims(groundtruth_boxes, 0),
multiples=[batch_size, 1, 1])
groundtruth_classes = tf.tile(tf.expand_dims(groundtruth_classes, 0),
multiples=[batch_size, 1])
groundtruth_instance_masks = tf.tile(
tf.expand_dims(groundtruth_instance_masks, 0),
multiples=[batch_size, 1, 1, 1])
groundtruth_keypoints = tf.tile(
tf.expand_dims(groundtruth_keypoints, 0),
multiples=[batch_size, 1, 1])
original_image_spatial_shapes = tf.tile(original_image_spatial_shapes,
multiples=[batch_size, 1])
detections = {
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
detection_fields.num_detections: num_detections
}
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_keypoints: groundtruth_keypoints,
input_data_fields.groundtruth_instance_masks:
groundtruth_instance_masks,
input_data_fields.original_image_spatial_shape:
original_image_spatial_shapes
}
if batch_size > 1:
return eval_util.result_dict_for_batched_example(
image, key, detections, groundtruth,
scale_to_absolute=scale_to_absolute,
max_gt_boxes=max_gt_boxes)
else:
return eval_util.result_dict_for_single_example(
image, key, detections, groundtruth,
scale_to_absolute=scale_to_absolute)
@parameterized.parameters(
{'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True},
{'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True},
{'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False},
{'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False}
)
@unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X')
def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1,
max_gt_boxes=None,
scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['coco_detection_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op = metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
@parameterized.parameters(
{'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True},
{'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True},
{'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False},
{'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False}
)
@unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X')
def test_get_eval_metric_ops_for_coco_detections_and_masks(
self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
@parameterized.parameters(
{'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True},
{'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True},
{'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False},
{'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False}
)
@unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X')
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(
self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute,
resized_groundtruth_masks=True)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
@unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X')
def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['unsupported_metric'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict()
with self.assertRaises(ValueError):
eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
def test_get_eval_metric_ops_for_evaluators(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend([
'coco_detection_metrics', 'coco_mask_metrics',
'precision_at_recall_detection_metrics'
])
eval_config.include_metrics_per_category = True
eval_config.recall_lower_bound = 0.2
eval_config.recall_upper_bound = 0.6
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
self.assertTrue(evaluator_options['coco_detection_metrics']
['include_metrics_per_category'])
self.assertFalse(evaluator_options['coco_detection_metrics']
['skip_predictions_for_unlabeled_class'])
self.assertTrue(
evaluator_options['coco_mask_metrics']['include_metrics_per_category'])
self.assertAlmostEqual(
evaluator_options['precision_at_recall_detection_metrics']
['recall_lower_bound'], eval_config.recall_lower_bound)
self.assertAlmostEqual(
evaluator_options['precision_at_recall_detection_metrics']
['recall_upper_bound'], eval_config.recall_upper_bound)
def test_get_evaluator_with_evaluator_options(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'precision_at_recall_detection_metrics'])
eval_config.include_metrics_per_category = True
eval_config.skip_predictions_for_unlabeled_class = True
eval_config.recall_lower_bound = 0.2
eval_config.recall_upper_bound = 0.6
categories = self._get_categories_list()
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
evaluator = eval_util.get_evaluators(eval_config, categories,
evaluator_options)
self.assertTrue(evaluator[0]._include_metrics_per_category)
self.assertTrue(evaluator[0]._skip_predictions_for_unlabeled_class)
self.assertAlmostEqual(evaluator[1]._recall_lower_bound,
eval_config.recall_lower_bound)
self.assertAlmostEqual(evaluator[1]._recall_upper_bound,
eval_config.recall_upper_bound)
def test_get_evaluator_with_no_evaluator_options(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'precision_at_recall_detection_metrics'])
eval_config.include_metrics_per_category = True
eval_config.recall_lower_bound = 0.2
eval_config.recall_upper_bound = 0.6
categories = self._get_categories_list()
evaluator = eval_util.get_evaluators(
eval_config, categories, evaluator_options=None)
# Even though we are setting eval_config.include_metrics_per_category = True
# and bounds on recall, these options are never passed into the
# DetectionEvaluator constructor (via `evaluator_options`).
self.assertFalse(evaluator[0]._include_metrics_per_category)
self.assertAlmostEqual(evaluator[1]._recall_lower_bound, 0.0)
self.assertAlmostEqual(evaluator[1]._recall_upper_bound, 1.0)
def test_get_evaluator_with_keypoint_metrics(self):
eval_config = eval_pb2.EvalConfig()
person_keypoints_metric = eval_config.parameterized_metric.add()
person_keypoints_metric.coco_keypoint_metrics.class_label = 'person'
person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
'left_eye'] = 0.1
person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
'right_eye'] = 0.2
dog_keypoints_metric = eval_config.parameterized_metric.add()
dog_keypoints_metric.coco_keypoint_metrics.class_label = 'dog'
dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
'tail_start'] = 0.3
dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
'mouth'] = 0.4
categories = self._get_categories_list_with_keypoints()
evaluator = eval_util.get_evaluators(
eval_config, categories, evaluator_options=None)
# Verify keypoint evaluator class variables.
self.assertLen(evaluator, 3)
self.assertFalse(evaluator[0]._include_metrics_per_category)
self.assertEqual(evaluator[1]._category_name, 'person')
self.assertEqual(evaluator[2]._category_name, 'dog')
self.assertAllEqual(evaluator[1]._keypoint_ids, [0, 3])
self.assertAllEqual(evaluator[2]._keypoint_ids, [1, 2])
self.assertAllClose([0.1, 0.2], evaluator[1]._oks_sigmas)
self.assertAllClose([0.3, 0.4], evaluator[2]._oks_sigmas)
def test_get_evaluator_with_unmatched_label(self):
eval_config = eval_pb2.EvalConfig()
person_keypoints_metric = eval_config.parameterized_metric.add()
person_keypoints_metric.coco_keypoint_metrics.class_label = 'unmatched'
person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[
'kpt'] = 0.1
categories = self._get_categories_list_with_keypoints()
evaluator = eval_util.get_evaluators(
eval_config, categories, evaluator_options=None)
self.assertLen(evaluator, 1)
self.assertNotIsInstance(
evaluator[0], coco_evaluation.CocoKeypointEvaluator)
def test_padded_image_result_dict(self):
input_data_fields = fields.InputDataFields
detection_fields = fields.DetectionResultFields
key = tf.constant([str(i) for i in range(2)])
detection_boxes = np.array([[[0., 0., 1., 1.]], [[0.0, 0.0, 0.5, 0.5]]],
dtype=np.float32)
detection_keypoints = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]],
dtype=np.float32)
def graph_fn():
detections = {
detection_fields.detection_boxes:
tf.constant(detection_boxes),
detection_fields.detection_scores:
tf.constant([[1.], [1.]]),
detection_fields.detection_classes:
tf.constant([[1], [2]]),
detection_fields.num_detections:
tf.constant([1, 1]),
detection_fields.detection_keypoints:
tf.tile(
tf.reshape(
tf.constant(detection_keypoints), shape=[1, 1, 3, 2]),
multiples=[2, 1, 1, 1])
}
gt_boxes = detection_boxes
groundtruth = {
input_data_fields.groundtruth_boxes:
tf.constant(gt_boxes),
input_data_fields.groundtruth_classes:
tf.constant([[1.], [1.]]),
input_data_fields.groundtruth_keypoints:
tf.tile(
tf.reshape(
tf.constant(detection_keypoints), shape=[1, 1, 3, 2]),
multiples=[2, 1, 1, 1])
}
image = tf.zeros((2, 100, 100, 3), dtype=tf.float32)
true_image_shapes = tf.constant([[100, 100, 3], [50, 100, 3]])
original_image_spatial_shapes = tf.constant([[200, 200], [150, 300]])
result = eval_util.result_dict_for_batched_example(
image, key, detections, groundtruth,
scale_to_absolute=True,
true_image_shapes=true_image_shapes,
original_image_spatial_shapes=original_image_spatial_shapes,
max_gt_boxes=tf.constant(1))
return (result[input_data_fields.groundtruth_boxes],
result[input_data_fields.groundtruth_keypoints],
result[detection_fields.detection_boxes],
result[detection_fields.detection_keypoints])
(gt_boxes, gt_keypoints, detection_boxes,
detection_keypoints) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[[[0., 0., 200., 200.]], [[0.0, 0.0, 150., 150.]]],
gt_boxes)
self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]],
[[[0., 0.], [150., 150.], [300., 300.]]]],
gt_keypoints)
# Predictions from the model are not scaled.
self.assertAllEqual(
[[[0., 0., 200., 200.]], [[0.0, 0.0, 75., 150.]]],
detection_boxes)
self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]],
[[[0., 0.], [75., 150.], [150., 300.]]]],
detection_keypoints)
def test_evaluator_options_from_eval_config_no_super_categories(self):
eval_config_text_proto = """
metrics_set: "coco_detection_metrics"
metrics_set: "coco_mask_metrics"
include_metrics_per_category: true
use_moving_averages: false
batch_size: 1;
"""
eval_config = eval_pb2.EvalConfig()
text_format.Merge(eval_config_text_proto, eval_config)
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
self.assertNotIn('super_categories', evaluator_options['coco_mask_metrics'])
def test_evaluator_options_from_eval_config_with_super_categories(self):
eval_config_text_proto = """
metrics_set: "coco_detection_metrics"
metrics_set: "coco_mask_metrics"
include_metrics_per_category: true
use_moving_averages: false
batch_size: 1;
super_categories {
key: "supercat1"
value: "a,b,c"
}
super_categories {
key: "supercat2"
value: "d,e,f"
}
"""
eval_config = eval_pb2.EvalConfig()
text_format.Merge(eval_config_text_proto, eval_config)
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
self.assertIn('super_categories', evaluator_options['coco_mask_metrics'])
super_categories = evaluator_options[
'coco_mask_metrics']['super_categories']
self.assertIn('supercat1', super_categories)
self.assertIn('supercat2', super_categories)
self.assertAllEqual(super_categories['supercat1'], ['a', 'b', 'c'])
self.assertAllEqual(super_categories['supercat2'], ['d', 'e', 'f'])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/eval_util_test.py | eval_util_test.py |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports an SSD detection model to use with tf-lite.
See export_tflite_ssd_graph.py for usage.
"""
import os
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import saver_pb2
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import post_processing_builder
from object_detection.core import box_list
from object_detection.utils import tf_version
_DEFAULT_NUM_CHANNELS = 3
_DEFAULT_NUM_COORD_BOX = 4
if tf_version.is_tf1():
from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=g-import-not-at-top
def get_const_center_size_encoded_anchors(anchors):
"""Exports center-size encoded anchors as a constant tensor.
Args:
anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor
boxes
Returns:
encoded_anchors: a float32 constant tensor of shape [num_anchors, 4]
containing the anchor boxes.
"""
anchor_boxlist = box_list.BoxList(anchors)
y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()
num_anchors = y.get_shape().as_list()
with tf.Session() as sess:
y_out, x_out, h_out, w_out = sess.run([y, x, h, w])
encoded_anchors = tf.constant(
np.transpose(np.stack((y_out, x_out, h_out, w_out))),
dtype=tf.float32,
shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],
name='anchors')
return encoded_anchors
def append_postprocessing_op(frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class=100,
use_regular_nms=False,
additional_output_tensors=()):
"""Appends postprocessing custom op.
Args:
frozen_graph_def: Frozen GraphDef for SSD model after freezing the
checkpoint
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
nms_score_threshold: Score threshold used in Non-maximal suppression in
post-processing
nms_iou_threshold: Intersection-over-union threshold used in Non-maximal
suppression in post-processing
num_classes: number of classes in SSD detector
scale_values: scale values is a dict with following key-value pairs
{y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode
centersize boxes
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
additional_output_tensors: Array of additional tensor names to output.
Tensors are appended after postprocessing output.
Returns:
transformed_graph_def: Frozen GraphDef with postprocessing custom op
appended
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected
boxes
"""
new_output = frozen_graph_def.node.add()
new_output.op = 'TFLite_Detection_PostProcess'
new_output.name = 'TFLite_Detection_PostProcess'
new_output.attr['_output_quantized'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['_output_types'].list.type.extend([
types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT,
types_pb2.DT_FLOAT
])
new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['max_detections'].CopyFrom(
attr_value_pb2.AttrValue(i=max_detections))
new_output.attr['max_classes_per_detection'].CopyFrom(
attr_value_pb2.AttrValue(i=max_classes_per_detection))
new_output.attr['nms_score_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))
new_output.attr['nms_iou_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))
new_output.attr['num_classes'].CopyFrom(
attr_value_pb2.AttrValue(i=num_classes))
new_output.attr['y_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))
new_output.attr['x_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))
new_output.attr['h_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))
new_output.attr['w_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))
new_output.attr['detections_per_class'].CopyFrom(
attr_value_pb2.AttrValue(i=detections_per_class))
new_output.attr['use_regular_nms'].CopyFrom(
attr_value_pb2.AttrValue(b=use_regular_nms))
new_output.input.extend(
['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])
# Transform the graph to append new postprocessing op
input_names = []
output_names = ['TFLite_Detection_PostProcess'
] + list(additional_output_tensors)
transforms = ['strip_unused_nodes']
transformed_graph_def = TransformGraph(frozen_graph_def, input_names,
output_names, transforms)
return transformed_graph_def
def export_tflite_graph(pipeline_config,
trained_checkpoint_prefix,
output_dir,
add_postprocessing_op,
max_detections,
max_classes_per_detection,
detections_per_class=100,
use_regular_nms=False,
binary_graph_name='tflite_graph.pb',
txt_graph_name='tflite_graph.pbtxt',
additional_output_tensors=()):
"""Exports a tflite compatible graph and anchors for ssd detection model.
Anchors are written to a tensor and tflite compatible graph
is written to output_dir/tflite_graph.pb.
Args:
pipeline_config: a pipeline.proto object containing the configuration for
SSD model to export.
trained_checkpoint_prefix: a file prefix for the checkpoint containing the
trained parameters of the SSD model.
output_dir: A directory to write the tflite graph and anchor file to.
add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
binary_graph_name: Name of the exported graph file in binary format.
txt_graph_name: Name of the exported graph file in text format.
additional_output_tensors: Array of additional tensor names to output.
Additional tensors are appended to the end of output tensor list.
Raises:
ValueError: if the pipeline config contains models other than ssd or uses an
fixed_shape_resizer and provides a shape as well.
"""
tf.gfile.MakeDirs(output_dir)
if pipeline_config.model.WhichOneof('model') != 'ssd':
raise ValueError('Only ssd models are supported in tflite. '
'Found {} in config'.format(
pipeline_config.model.WhichOneof('model')))
num_classes = pipeline_config.model.ssd.num_classes
nms_score_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression
.score_threshold
}
nms_iou_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression
.iou_threshold
}
scale_values = {}
scale_values['y_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale
}
scale_values['x_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale
}
scale_values['h_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale
}
scale_values['w_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale
}
image_resizer_config = pipeline_config.model.ssd.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
height = image_resizer_config.fixed_shape_resizer.height
width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
num_channels = 1
shape = [1, height, width, num_channels]
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(
image_resizer_config.WhichOneof('image_resizer_oneof')))
image = tf.placeholder(
tf.float32, shape=shape, name='normalized_input_image_tensor')
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
predicted_tensors = detection_model.predict(image, true_image_shapes=None)
# The score conversion occurs before the post-processing custom op
_, score_conversion_fn = post_processing_builder.build(
pipeline_config.model.ssd.post_processing)
class_predictions = score_conversion_fn(
predicted_tensors['class_predictions_with_background'])
with tf.name_scope('raw_outputs'):
# 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
# containing the encoded box predictions. Note that these are raw
# predictions and no Non-Max suppression is applied on them and
# no decode center size boxes is applied to them.
tf.identity(predicted_tensors['box_encodings'], name='box_encodings')
# 'raw_outputs/class_predictions': a float32 tensor of shape
# [1, num_anchors, num_classes] containing the class scores for each anchor
# after applying score conversion.
tf.identity(class_predictions, name='class_predictions')
# 'anchors': a float32 tensor of shape
# [4, num_anchors] containing the anchors as a constant node.
tf.identity(
get_const_center_size_encoded_anchors(predicted_tensors['anchors']),
name='anchors')
# Add global step to the graph, so we know the training step number when we
# evaluate the model.
tf.train.get_or_create_global_step()
# graph rewriter
is_quantized = pipeline_config.HasField('graph_rewriter')
if is_quantized:
graph_rewriter_config = pipeline_config.graph_rewriter
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
if pipeline_config.model.ssd.feature_extractor.HasField('fpn'):
exporter.rewrite_nn_resize_op(is_quantized)
# freeze the graph
saver_kwargs = {}
if pipeline_config.eval_config.use_moving_averages:
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
moving_average_checkpoint = tempfile.NamedTemporaryFile()
exporter.replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
moving_average_checkpoint.name)
checkpoint_to_use = moving_average_checkpoint.name
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=','.join([
'raw_outputs/box_encodings', 'raw_outputs/class_predictions',
'anchors'
] + list(additional_output_tensors)),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
output_graph='',
initializer_nodes='')
# Add new operation to do post processing in a custom op (TF Lite only)
if add_postprocessing_op:
transformed_graph_def = append_postprocessing_op(
frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class,
use_regular_nms,
additional_output_tensors=additional_output_tensors)
else:
# Return frozen without adding post-processing custom op
transformed_graph_def = frozen_graph_def
binary_graph = os.path.join(output_dir, binary_graph_name)
with tf.gfile.GFile(binary_graph, 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
txt_graph = os.path.join(output_dir, txt_graph_name)
with tf.gfile.GFile(txt_graph, 'w') as f:
f.write(str(transformed_graph_def))
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/export_tflite_ssd_graph_lib.py | export_tflite_ssd_graph_lib.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model input function for tf-learn object detection model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v1 as tf
from object_detection.builders import dataset_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import model_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import eval_pb2
from object_detection.protos import image_resizer_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import ops as util_ops
from object_detection.utils import shape_utils
HASH_KEY = 'hash'
HASH_BINS = 1 << 31
SERVING_FED_EXAMPLE_KEY = 'serialized_example'
_LABEL_OFFSET = 1
# A map of names to methods that help build the input pipeline.
INPUT_BUILDER_UTIL_MAP = {
'dataset_build': dataset_builder.build,
'model_build': model_builder.build,
}
def _multiclass_scores_or_one_hot_labels(multiclass_scores,
groundtruth_boxes,
groundtruth_classes, num_classes):
"""Returns one-hot encoding of classes when multiclass_scores is empty."""
# Replace groundtruth_classes tensor with multiclass_scores tensor when its
# non-empty. If multiclass_scores is empty fall back on groundtruth_classes
# tensor.
def true_fn():
return tf.reshape(multiclass_scores,
[tf.shape(groundtruth_boxes)[0], num_classes])
def false_fn():
return tf.one_hot(groundtruth_classes, num_classes)
return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn)
def convert_labeled_classes_to_k_hot(groundtruth_labeled_classes,
num_classes,
map_empty_to_ones=False):
"""Returns k-hot encoding of the labeled classes.
If map_empty_to_ones is enabled and the input labeled_classes is empty,
this function assumes all classes are exhaustively labeled, thus returning
an all-one encoding.
Args:
groundtruth_labeled_classes: a Tensor holding a sparse representation of
labeled classes.
num_classes: an integer representing the number of classes
map_empty_to_ones: boolean (default: False). Set this to be True to default
to an all-ones result if given an empty `groundtruth_labeled_classes`.
Returns:
A k-hot (and 0-indexed) tensor representation of
`groundtruth_labeled_classes`.
"""
# If the input labeled_classes is empty, it assumes all classes are
# exhaustively labeled, thus returning an all-one encoding.
def true_fn():
return tf.sparse_to_dense(
groundtruth_labeled_classes - _LABEL_OFFSET, [num_classes],
tf.constant(1, dtype=tf.float32),
validate_indices=False)
def false_fn():
return tf.ones(num_classes, dtype=tf.float32)
if map_empty_to_ones:
return tf.cond(tf.size(groundtruth_labeled_classes) > 0, true_fn, false_fn)
return true_fn()
def _remove_unrecognized_classes(class_ids, unrecognized_label):
"""Returns class ids with unrecognized classes filtered out."""
recognized_indices = tf.squeeze(
tf.where(tf.greater(class_ids, unrecognized_label)), -1)
return tf.gather(class_ids, recognized_indices)
def assert_or_prune_invalid_boxes(boxes):
"""Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin).
When the hardware supports assertions, the function raises an error when
boxes have an invalid size. If assertions are not supported (e.g. on TPU),
boxes with invalid sizes are filtered out.
Args:
boxes: float tensor of shape [num_boxes, 4]
Returns:
boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes
filtered out.
Raises:
tf.errors.InvalidArgumentError: When we detect boxes with invalid size.
This is not supported on TPUs.
"""
ymin, xmin, ymax, xmax = tf.split(
boxes, num_or_size_splits=4, axis=1)
height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax])
width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax])
with tf.control_dependencies([height_check, width_check]):
boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1)
boxlist = box_list.BoxList(boxes_tensor)
# TODO(b/149221748) Remove pruning when XLA supports assertions.
boxlist = box_list_ops.prune_small_boxes(boxlist, 0)
return boxlist.get()
def transform_input_data(tensor_dict,
model_preprocess_fn,
image_resizer_fn,
num_classes,
data_augmentation_fn=None,
merge_multiple_boxes=False,
retain_original_image=False,
use_multiclass_scores=False,
use_bfloat16=False,
retain_original_image_additional_channels=False,
keypoint_type_weight=None):
"""A single function that is responsible for all input data transformations.
Data transformation functions are applied in the following order.
1. If key fields.InputDataFields.image_additional_channels is present in
tensor_dict, the additional channels will be merged into
fields.InputDataFields.image.
2. data_augmentation_fn (optional): applied on tensor_dict.
3. model_preprocess_fn: applied only on image tensor in tensor_dict.
4. keypoint_type_weight (optional): If groundtruth keypoints are in
the tensor dictionary, per-keypoint weights are produced. These weights are
initialized by `keypoint_type_weight` (or ones if left None).
Then, for all keypoints that are not visible, the weights are set to 0 (to
avoid penalizing the model in a loss function).
5. image_resizer_fn: applied on original image and instance mask tensor in
tensor_dict.
6. one_hot_encoding: applied to classes tensor in tensor_dict.
7. merge_multiple_boxes (optional): when groundtruth boxes are exactly the
same they can be merged into a single box with an associated k-hot class
label.
Args:
tensor_dict: dictionary containing input tensors keyed by
fields.InputDataFields.
model_preprocess_fn: model's preprocess function to apply on image tensor.
This function must take in a 4-D float tensor and return a 4-D preprocess
float tensor and a tensor containing the true image shape.
image_resizer_fn: image resizer function to apply on groundtruth instance
`masks. This function must take a 3-D float tensor of an image and a 3-D
tensor of instance masks and return a resized version of these along with
the true shapes.
num_classes: number of max classes to one-hot (or k-hot) encode the class
labels.
data_augmentation_fn: (optional) data augmentation function to apply on
input `tensor_dict`.
merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes
and classes for a given image if the boxes are exactly the same.
retain_original_image: (optional) whether to retain original image in the
output dictionary.
use_multiclass_scores: whether to use multiclass scores as class targets
instead of one-hot encoding of `groundtruth_classes`. When
this is True and multiclass_scores is empty, one-hot encoding of
`groundtruth_classes` is used as a fallback.
use_bfloat16: (optional) a bool, whether to use bfloat16 in training.
retain_original_image_additional_channels: (optional) Whether to retain
original image additional channels in the output dictionary.
keypoint_type_weight: A list (of length num_keypoints) containing
groundtruth loss weights to use for each keypoint. If None, will use a
weight of 1.
Returns:
A dictionary keyed by fields.InputDataFields containing the tensors obtained
after applying all the transformations.
Raises:
KeyError: If both groundtruth_labeled_classes and groundtruth_image_classes
are provided by the decoder in tensor_dict since both fields are
considered to contain the same information.
"""
out_tensor_dict = tensor_dict.copy()
input_fields = fields.InputDataFields
labeled_classes_field = input_fields.groundtruth_labeled_classes
image_classes_field = input_fields.groundtruth_image_classes
verified_neg_classes_field = input_fields.groundtruth_verified_neg_classes
not_exhaustive_field = input_fields.groundtruth_not_exhaustive_classes
if (labeled_classes_field in out_tensor_dict and
image_classes_field in out_tensor_dict):
raise KeyError('groundtruth_labeled_classes and groundtruth_image_classes'
'are provided by the decoder, but only one should be set.')
for field, map_empty_to_ones in [
(labeled_classes_field, True),
(image_classes_field, True),
(verified_neg_classes_field, False),
(not_exhaustive_field, False)]:
if field in out_tensor_dict:
out_tensor_dict[field] = _remove_unrecognized_classes(
out_tensor_dict[field], unrecognized_label=-1)
out_tensor_dict[field] = convert_labeled_classes_to_k_hot(
out_tensor_dict[field], num_classes, map_empty_to_ones)
if input_fields.multiclass_scores in out_tensor_dict:
out_tensor_dict[
input_fields
.multiclass_scores] = _multiclass_scores_or_one_hot_labels(
out_tensor_dict[input_fields.multiclass_scores],
out_tensor_dict[input_fields.groundtruth_boxes],
out_tensor_dict[input_fields.groundtruth_classes],
num_classes)
if input_fields.groundtruth_boxes in out_tensor_dict:
out_tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates(
out_tensor_dict)
out_tensor_dict = util_ops.filter_unrecognized_classes(out_tensor_dict)
if retain_original_image:
out_tensor_dict[input_fields.original_image] = tf.cast(
image_resizer_fn(out_tensor_dict[input_fields.image],
None)[0], tf.uint8)
if input_fields.image_additional_channels in out_tensor_dict:
channels = out_tensor_dict[input_fields.image_additional_channels]
out_tensor_dict[input_fields.image] = tf.concat(
[out_tensor_dict[input_fields.image], channels], axis=2)
if retain_original_image_additional_channels:
out_tensor_dict[
input_fields.image_additional_channels] = tf.cast(
image_resizer_fn(channels, None)[0], tf.uint8)
# Apply data augmentation ops.
if data_augmentation_fn is not None:
out_tensor_dict = data_augmentation_fn(out_tensor_dict)
# Apply model preprocessing ops and resize instance masks.
image = out_tensor_dict[input_fields.image]
preprocessed_resized_image, true_image_shape = model_preprocess_fn(
tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0))
preprocessed_shape = tf.shape(preprocessed_resized_image)
new_height, new_width = preprocessed_shape[1], preprocessed_shape[2]
im_box = tf.stack([
0.0, 0.0,
tf.to_float(new_height) / tf.to_float(true_image_shape[0, 0]),
tf.to_float(new_width) / tf.to_float(true_image_shape[0, 1])
])
if input_fields.groundtruth_boxes in tensor_dict:
bboxes = out_tensor_dict[input_fields.groundtruth_boxes]
boxlist = box_list.BoxList(bboxes)
realigned_bboxes = box_list_ops.change_coordinate_frame(boxlist, im_box)
realigned_boxes_tensor = realigned_bboxes.get()
valid_boxes_tensor = assert_or_prune_invalid_boxes(realigned_boxes_tensor)
out_tensor_dict[
input_fields.groundtruth_boxes] = valid_boxes_tensor
if input_fields.groundtruth_keypoints in tensor_dict:
keypoints = out_tensor_dict[input_fields.groundtruth_keypoints]
realigned_keypoints = keypoint_ops.change_coordinate_frame(keypoints,
im_box)
out_tensor_dict[
input_fields.groundtruth_keypoints] = realigned_keypoints
flds_gt_kpt = input_fields.groundtruth_keypoints
flds_gt_kpt_vis = input_fields.groundtruth_keypoint_visibilities
flds_gt_kpt_weights = input_fields.groundtruth_keypoint_weights
if flds_gt_kpt_vis not in out_tensor_dict:
out_tensor_dict[flds_gt_kpt_vis] = tf.ones_like(
out_tensor_dict[flds_gt_kpt][:, :, 0],
dtype=tf.bool)
flds_gt_kpt_depth = fields.InputDataFields.groundtruth_keypoint_depths
flds_gt_kpt_depth_weight = (
fields.InputDataFields.groundtruth_keypoint_depth_weights)
if flds_gt_kpt_depth in out_tensor_dict:
out_tensor_dict[flds_gt_kpt_depth] = out_tensor_dict[flds_gt_kpt_depth]
out_tensor_dict[flds_gt_kpt_depth_weight] = out_tensor_dict[
flds_gt_kpt_depth_weight]
out_tensor_dict[flds_gt_kpt_weights] = (
keypoint_ops.keypoint_weights_from_visibilities(
out_tensor_dict[flds_gt_kpt_vis],
keypoint_type_weight))
dp_surface_coords_fld = input_fields.groundtruth_dp_surface_coords
if dp_surface_coords_fld in tensor_dict:
dp_surface_coords = out_tensor_dict[dp_surface_coords_fld]
realigned_dp_surface_coords = densepose_ops.change_coordinate_frame(
dp_surface_coords, im_box)
out_tensor_dict[dp_surface_coords_fld] = realigned_dp_surface_coords
if use_bfloat16:
preprocessed_resized_image = tf.cast(
preprocessed_resized_image, tf.bfloat16)
if input_fields.context_features in out_tensor_dict:
out_tensor_dict[input_fields.context_features] = tf.cast(
out_tensor_dict[input_fields.context_features], tf.bfloat16)
out_tensor_dict[input_fields.image] = tf.squeeze(
preprocessed_resized_image, axis=0)
out_tensor_dict[input_fields.true_image_shape] = tf.squeeze(
true_image_shape, axis=0)
if input_fields.groundtruth_instance_masks in out_tensor_dict:
masks = out_tensor_dict[input_fields.groundtruth_instance_masks]
_, resized_masks, _ = image_resizer_fn(image, masks)
if use_bfloat16:
resized_masks = tf.cast(resized_masks, tf.bfloat16)
out_tensor_dict[
input_fields.groundtruth_instance_masks] = resized_masks
zero_indexed_groundtruth_classes = out_tensor_dict[
input_fields.groundtruth_classes] - _LABEL_OFFSET
if use_multiclass_scores:
out_tensor_dict[
input_fields.groundtruth_classes] = out_tensor_dict[
input_fields.multiclass_scores]
else:
out_tensor_dict[input_fields.groundtruth_classes] = tf.one_hot(
zero_indexed_groundtruth_classes, num_classes)
out_tensor_dict.pop(input_fields.multiclass_scores, None)
if input_fields.groundtruth_confidences in out_tensor_dict:
groundtruth_confidences = out_tensor_dict[
input_fields.groundtruth_confidences]
# Map the confidences to the one-hot encoding of classes
out_tensor_dict[input_fields.groundtruth_confidences] = (
tf.reshape(groundtruth_confidences, [-1, 1]) *
out_tensor_dict[input_fields.groundtruth_classes])
else:
groundtruth_confidences = tf.ones_like(
zero_indexed_groundtruth_classes, dtype=tf.float32)
out_tensor_dict[input_fields.groundtruth_confidences] = (
out_tensor_dict[input_fields.groundtruth_classes])
if merge_multiple_boxes:
merged_boxes, merged_classes, merged_confidences, _ = (
util_ops.merge_boxes_with_multiple_labels(
out_tensor_dict[input_fields.groundtruth_boxes],
zero_indexed_groundtruth_classes,
groundtruth_confidences,
num_classes))
merged_classes = tf.cast(merged_classes, tf.float32)
out_tensor_dict[input_fields.groundtruth_boxes] = merged_boxes
out_tensor_dict[input_fields.groundtruth_classes] = merged_classes
out_tensor_dict[input_fields.groundtruth_confidences] = (
merged_confidences)
if input_fields.groundtruth_boxes in out_tensor_dict:
out_tensor_dict[input_fields.num_groundtruth_boxes] = tf.shape(
out_tensor_dict[input_fields.groundtruth_boxes])[0]
return out_tensor_dict
def pad_input_data_to_static_shapes(tensor_dict,
max_num_boxes,
num_classes,
spatial_image_shape=None,
max_num_context_features=None,
context_feature_length=None,
max_dp_points=336):
"""Pads input tensors to static shapes.
In case num_additional_channels > 0, we assume that the additional channels
have already been concatenated to the base image.
Args:
tensor_dict: Tensor dictionary of input data
max_num_boxes: Max number of groundtruth boxes needed to compute shapes for
padding.
num_classes: Number of classes in the dataset needed to compute shapes for
padding.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image.
max_num_context_features (optional): The maximum number of context
features needed to compute shapes padding.
context_feature_length (optional): The length of the context feature.
max_dp_points (optional): The maximum number of DensePose sampled points per
instance. The default (336) is selected since the original DensePose paper
(https://arxiv.org/pdf/1802.00434.pdf) indicates that the maximum number
of samples per part is 14, and therefore 24 * 14 = 336 is the maximum
sampler per instance.
Returns:
A dictionary keyed by fields.InputDataFields containing padding shapes for
tensors in the dataset.
Raises:
ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we
detect that additional channels have not been concatenated yet, or if
max_num_context_features is not specified and context_features is in the
tensor dict.
"""
if not spatial_image_shape or spatial_image_shape == [-1, -1]:
height, width = None, None
else:
height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence
input_fields = fields.InputDataFields
num_additional_channels = 0
if input_fields.image_additional_channels in tensor_dict:
num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[
input_fields.image_additional_channels].shape[2])
# We assume that if num_additional_channels > 0, then it has already been
# concatenated to the base image (but not the ground truth).
num_channels = 3
if input_fields.image in tensor_dict:
num_channels = shape_utils.get_dim_as_int(
tensor_dict[input_fields.image].shape[2])
if num_additional_channels:
if num_additional_channels >= num_channels:
raise ValueError(
'Image must be already concatenated with additional channels.')
if (input_fields.original_image in tensor_dict and
shape_utils.get_dim_as_int(
tensor_dict[input_fields.original_image].shape[2]) ==
num_channels):
raise ValueError(
'Image must be already concatenated with additional channels.')
if input_fields.context_features in tensor_dict and (
max_num_context_features is None):
raise ValueError('max_num_context_features must be specified in the model '
'config if include_context is specified in the input '
'config')
padding_shapes = {
input_fields.image: [height, width, num_channels],
input_fields.original_image_spatial_shape: [2],
input_fields.image_additional_channels: [
height, width, num_additional_channels
],
input_fields.source_id: [],
input_fields.filename: [],
input_fields.key: [],
input_fields.groundtruth_difficult: [max_num_boxes],
input_fields.groundtruth_boxes: [max_num_boxes, 4],
input_fields.groundtruth_classes: [max_num_boxes, num_classes],
input_fields.groundtruth_instance_masks: [
max_num_boxes, height, width
],
input_fields.groundtruth_instance_mask_weights: [max_num_boxes],
input_fields.groundtruth_is_crowd: [max_num_boxes],
input_fields.groundtruth_group_of: [max_num_boxes],
input_fields.groundtruth_area: [max_num_boxes],
input_fields.groundtruth_weights: [max_num_boxes],
input_fields.groundtruth_confidences: [
max_num_boxes, num_classes
],
input_fields.num_groundtruth_boxes: [],
input_fields.groundtruth_label_types: [max_num_boxes],
input_fields.groundtruth_label_weights: [max_num_boxes],
input_fields.true_image_shape: [3],
input_fields.groundtruth_image_classes: [num_classes],
input_fields.groundtruth_image_confidences: [num_classes],
input_fields.groundtruth_labeled_classes: [num_classes],
}
if input_fields.original_image in tensor_dict:
padding_shapes[input_fields.original_image] = [
height, width,
shape_utils.get_dim_as_int(tensor_dict[input_fields.
original_image].shape[2])
]
if input_fields.groundtruth_keypoints in tensor_dict:
tensor_shape = (
tensor_dict[input_fields.groundtruth_keypoints].shape)
padding_shape = [max_num_boxes,
shape_utils.get_dim_as_int(tensor_shape[1]),
shape_utils.get_dim_as_int(tensor_shape[2])]
padding_shapes[input_fields.groundtruth_keypoints] = padding_shape
if input_fields.groundtruth_keypoint_visibilities in tensor_dict:
tensor_shape = tensor_dict[input_fields.
groundtruth_keypoint_visibilities].shape
padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
padding_shapes[input_fields.
groundtruth_keypoint_visibilities] = padding_shape
if fields.InputDataFields.groundtruth_keypoint_depths in tensor_dict:
tensor_shape = tensor_dict[fields.InputDataFields.
groundtruth_keypoint_depths].shape
padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
padding_shapes[fields.InputDataFields.
groundtruth_keypoint_depths] = padding_shape
padding_shapes[fields.InputDataFields.
groundtruth_keypoint_depth_weights] = padding_shape
if input_fields.groundtruth_keypoint_weights in tensor_dict:
tensor_shape = (
tensor_dict[input_fields.groundtruth_keypoint_weights].shape)
padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
padding_shapes[input_fields.
groundtruth_keypoint_weights] = padding_shape
if input_fields.groundtruth_dp_num_points in tensor_dict:
padding_shapes[
input_fields.groundtruth_dp_num_points] = [max_num_boxes]
padding_shapes[
input_fields.groundtruth_dp_part_ids] = [
max_num_boxes, max_dp_points]
padding_shapes[
input_fields.groundtruth_dp_surface_coords] = [
max_num_boxes, max_dp_points, 4]
if input_fields.groundtruth_track_ids in tensor_dict:
padding_shapes[
input_fields.groundtruth_track_ids] = [max_num_boxes]
if input_fields.groundtruth_verified_neg_classes in tensor_dict:
padding_shapes[
input_fields.groundtruth_verified_neg_classes] = [num_classes]
if input_fields.groundtruth_not_exhaustive_classes in tensor_dict:
padding_shapes[
input_fields.groundtruth_not_exhaustive_classes] = [num_classes]
# Prepare for ContextRCNN related fields.
if input_fields.context_features in tensor_dict:
padding_shape = [max_num_context_features, context_feature_length]
padding_shapes[input_fields.context_features] = padding_shape
tensor_shape = tf.shape(
tensor_dict[fields.InputDataFields.context_features])
tensor_dict[fields.InputDataFields.valid_context_size] = tensor_shape[0]
padding_shapes[fields.InputDataFields.valid_context_size] = []
if fields.InputDataFields.context_feature_length in tensor_dict:
padding_shapes[fields.InputDataFields.context_feature_length] = []
if fields.InputDataFields.context_features_image_id_list in tensor_dict:
padding_shapes[fields.InputDataFields.context_features_image_id_list] = [
max_num_context_features]
if input_fields.is_annotated in tensor_dict:
padding_shapes[input_fields.is_annotated] = []
padded_tensor_dict = {}
for tensor_name in tensor_dict:
padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd(
tensor_dict[tensor_name], padding_shapes[tensor_name])
# Make sure that the number of groundtruth boxes now reflects the
# padded/clipped tensors.
if input_fields.num_groundtruth_boxes in padded_tensor_dict:
padded_tensor_dict[input_fields.num_groundtruth_boxes] = (
tf.minimum(
padded_tensor_dict[input_fields.num_groundtruth_boxes],
max_num_boxes))
return padded_tensor_dict
def augment_input_data(tensor_dict, data_augmentation_options):
"""Applies data augmentation ops to input tensors.
Args:
tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields.
data_augmentation_options: A list of tuples, where each tuple contains a
function and a dictionary that contains arguments and their values.
Usually, this is the output of core/preprocessor.build.
Returns:
A dictionary of tensors obtained by applying data augmentation ops to the
input tensor dictionary.
"""
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0)
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_instance_mask_weights = (
fields.InputDataFields.groundtruth_instance_mask_weights in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
include_keypoint_visibilities = (
fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict)
include_keypoint_depths = (
fields.InputDataFields.groundtruth_keypoint_depths in tensor_dict)
include_label_weights = (fields.InputDataFields.groundtruth_weights
in tensor_dict)
include_label_confidences = (fields.InputDataFields.groundtruth_confidences
in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores in
tensor_dict)
dense_pose_fields = [fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords]
include_dense_pose = all(field in tensor_dict for field in dense_pose_fields)
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_label_weights=include_label_weights,
include_label_confidences=include_label_confidences,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_instance_mask_weights=include_instance_mask_weights,
include_keypoints=include_keypoints,
include_keypoint_visibilities=include_keypoint_visibilities,
include_dense_pose=include_dense_pose,
include_keypoint_depths=include_keypoint_depths))
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image], axis=0)
return tensor_dict
def _get_labels_dict(input_dict):
"""Extracts labels dict from input dict."""
required_label_keys = [
fields.InputDataFields.num_groundtruth_boxes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_weights,
]
labels_dict = {}
for key in required_label_keys:
labels_dict[key] = input_dict[key]
optional_label_keys = [
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_labeled_classes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_depths,
fields.InputDataFields.groundtruth_keypoint_depth_weights,
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_instance_mask_weights,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_keypoint_weights,
fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords,
fields.InputDataFields.groundtruth_track_ids,
fields.InputDataFields.groundtruth_verified_neg_classes,
fields.InputDataFields.groundtruth_not_exhaustive_classes
]
for key in optional_label_keys:
if key in input_dict:
labels_dict[key] = input_dict[key]
if fields.InputDataFields.groundtruth_difficult in labels_dict:
labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast(
labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32)
return labels_dict
def _replace_empty_string_with_random_number(string_tensor):
"""Returns string unchanged if non-empty, and random string tensor otherwise.
The random string is an integer 0 and 2**63 - 1, casted as string.
Args:
string_tensor: A tf.tensor of dtype string.
Returns:
out_string: A tf.tensor of dtype string. If string_tensor contains the empty
string, out_string will contain a random integer casted to a string.
Otherwise string_tensor is returned unchanged.
"""
empty_string = tf.constant('', dtype=tf.string, name='EmptyString')
random_source_id = tf.as_string(
tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64))
out_string = tf.cond(
tf.equal(string_tensor, empty_string),
true_fn=lambda: random_source_id,
false_fn=lambda: string_tensor)
return out_string
def _get_features_dict(input_dict, include_source_id=False):
"""Extracts features dict from input dict."""
source_id = _replace_empty_string_with_random_number(
input_dict[fields.InputDataFields.source_id])
hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)
features = {
fields.InputDataFields.image:
input_dict[fields.InputDataFields.image],
HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
fields.InputDataFields.true_image_shape:
input_dict[fields.InputDataFields.true_image_shape],
fields.InputDataFields.original_image_spatial_shape:
input_dict[fields.InputDataFields.original_image_spatial_shape]
}
if include_source_id:
features[fields.InputDataFields.source_id] = source_id
if fields.InputDataFields.original_image in input_dict:
features[fields.InputDataFields.original_image] = input_dict[
fields.InputDataFields.original_image]
if fields.InputDataFields.image_additional_channels in input_dict:
features[fields.InputDataFields.image_additional_channels] = input_dict[
fields.InputDataFields.image_additional_channels]
if fields.InputDataFields.context_features in input_dict:
features[fields.InputDataFields.context_features] = input_dict[
fields.InputDataFields.context_features]
if fields.InputDataFields.valid_context_size in input_dict:
features[fields.InputDataFields.valid_context_size] = input_dict[
fields.InputDataFields.valid_context_size]
if fields.InputDataFields.context_features_image_id_list in input_dict:
features[fields.InputDataFields.context_features_image_id_list] = (
input_dict[fields.InputDataFields.context_features_image_id_list])
return features
def create_train_input_fn(train_config, train_input_config,
model_config):
"""Creates a train `input` function for `Estimator`.
Args:
train_config: A train_pb2.TrainConfig.
train_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
Returns:
`input_fn` for `Estimator` in TRAIN mode.
"""
def _train_input_fn(params=None):
return train_input(train_config, train_input_config, model_config,
params=params)
return _train_input_fn
def train_input(train_config, train_input_config,
model_config, model=None, params=None, input_context=None):
"""Returns `features` and `labels` tensor dictionaries for training.
Args:
train_config: A train_pb2.TrainConfig.
train_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
model: A pre-constructed Detection Model.
If None, one will be created from the config.
params: Parameter dictionary passed from the estimator.
input_context: optional, A tf.distribute.InputContext object used to
shard filenames and compute per-replica batch_size when this function
is being called per-replica.
Returns:
A tf.data.Dataset that holds (features, labels) tuple.
features: Dictionary of feature tensors.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: Dictionary of groundtruth tensors.
labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]
int32 tensor indicating the number of groundtruth boxes.
labels[fields.InputDataFields.groundtruth_boxes] is a
[batch_size, num_boxes, 4] float32 tensor containing the corners of
the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[batch_size, num_boxes, num_classes] float32 one-hot tensor of
classes.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[batch_size, num_boxes, H, W] float32 tensor containing only binary
values, which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for each instance mask.
labels[fields.InputDataFields.groundtruth_keypoints] is a
[batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
keypoints for each box.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes, num_keypoints] float32 tensor containing
groundtruth weights for the keypoints.
labels[fields.InputDataFields.groundtruth_visibilities] is a
[batch_size, num_boxes, num_keypoints] bool tensor containing
groundtruth visibilities for each keypoint.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[batch_size, num_classes] float32 k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_dp_num_points] is a
[batch_size, num_boxes] int32 tensor with the number of sampled
DensePose points per object.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is a
[batch_size, num_boxes, max_sampled_points] int32 tensor with the
DensePose part ids (0-indexed) per object.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
[batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the
DensePose surface coordinates. The format is (y, x, v, u), where (y, x)
are normalized image coordinates and (v, u) are normalized surface part
coordinates.
labels[fields.InputDataFields.groundtruth_track_ids] is a
[batch_size, num_boxes] int32 tensor with the track ID for each object.
Raises:
TypeError: if the `train_config`, `train_input_config` or `model_config`
are not of the correct type.
"""
if not isinstance(train_config, train_pb2.TrainConfig):
raise TypeError('For training mode, the `train_config` must be a '
'train_pb2.TrainConfig.')
if not isinstance(train_input_config, input_reader_pb2.InputReader):
raise TypeError('The `train_input_config` must be a '
'input_reader_pb2.InputReader.')
if not isinstance(model_config, model_pb2.DetectionModel):
raise TypeError('The `model_config` must be a '
'model_pb2.DetectionModel.')
if model is None:
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=True).preprocess
else:
model_preprocess_fn = model.preprocess
num_classes = config_util.get_number_of_classes(model_config)
def transform_and_pad_input_data_fn(tensor_dict):
"""Combines transform and pad operation."""
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options
]
data_augmentation_fn = functools.partial(
augment_input_data,
data_augmentation_options=data_augmentation_options)
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
keypoint_type_weight = train_input_config.keypoint_type_weight or None
transform_data_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=data_augmentation_fn,
merge_multiple_boxes=train_config.merge_multiple_label_boxes,
retain_original_image=train_config.retain_original_images,
use_multiclass_scores=train_config.use_multiclass_scores,
use_bfloat16=train_config.use_bfloat16,
keypoint_type_weight=keypoint_type_weight)
tensor_dict = pad_input_data_to_static_shapes(
tensor_dict=transform_data_fn(tensor_dict),
max_num_boxes=train_input_config.max_number_of_boxes,
num_classes=num_classes,
spatial_image_shape=config_util.get_spatial_image_size(
image_resizer_config),
max_num_context_features=config_util.get_max_num_context_features(
model_config),
context_feature_length=config_util.get_context_feature_length(
model_config))
include_source_id = train_input_config.include_source_id
return (_get_features_dict(tensor_dict, include_source_id),
_get_labels_dict(tensor_dict))
reduce_to_frame_fn = get_reduce_to_frame_fn(train_input_config, True)
dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
train_input_config,
transform_input_data_fn=transform_and_pad_input_data_fn,
batch_size=params['batch_size'] if params else train_config.batch_size,
input_context=input_context,
reduce_to_frame_fn=reduce_to_frame_fn)
return dataset
def create_eval_input_fn(eval_config, eval_input_config, model_config):
"""Creates an eval `input` function for `Estimator`.
Args:
eval_config: An eval_pb2.EvalConfig.
eval_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
Returns:
`input_fn` for `Estimator` in EVAL mode.
"""
def _eval_input_fn(params=None):
return eval_input(eval_config, eval_input_config, model_config,
params=params)
return _eval_input_fn
def eval_input(eval_config, eval_input_config, model_config,
model=None, params=None, input_context=None):
"""Returns `features` and `labels` tensor dictionaries for evaluation.
Args:
eval_config: An eval_pb2.EvalConfig.
eval_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
model: A pre-constructed Detection Model.
If None, one will be created from the config.
params: Parameter dictionary passed from the estimator.
input_context: optional, A tf.distribute.InputContext object used to
shard filenames and compute per-replica batch_size when this function
is being called per-replica.
Returns:
A tf.data.Dataset that holds (features, labels) tuple.
features: Dictionary of feature tensors.
features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor
with preprocessed images.
features[HASH_KEY] is a [1] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [1, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] is a [1, H', W', C]
float32 tensor with the original image.
labels: Dictionary of groundtruth tensors.
labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4]
float32 tensor containing the corners of the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[num_boxes, num_classes] float32 one-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes]
float32 tensor containing object areas.
labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes]
bool tensor indicating if the boxes enclose a crowd.
labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes]
int32 tensor indicating if the boxes represent difficult instances.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[1, num_boxes, H, W] float32 tensor containing only binary values,
which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a
[1, num_boxes] float32 tensor containing groundtruth weights for each
instance mask.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes, num_keypoints] float32 tensor containing
groundtruth weights for the keypoints.
labels[fields.InputDataFields.groundtruth_visibilities] is a
[batch_size, num_boxes, num_keypoints] bool tensor containing
groundtruth visibilities for each keypoint.
labels[fields.InputDataFields.groundtruth_group_of] is a [1, num_boxes]
bool tensor indicating if the box covers more than 5 instances of the
same class which heavily occlude each other.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[num_boxes, num_classes] float32 k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_dp_num_points] is a
[batch_size, num_boxes] int32 tensor with the number of sampled
DensePose points per object.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is a
[batch_size, num_boxes, max_sampled_points] int32 tensor with the
DensePose part ids (0-indexed) per object.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
[batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the
DensePose surface coordinates. The format is (y, x, v, u), where (y, x)
are normalized image coordinates and (v, u) are normalized surface part
coordinates.
labels[fields.InputDataFields.groundtruth_track_ids] is a
[batch_size, num_boxes] int32 tensor with the track ID for each object.
Raises:
TypeError: if the `eval_config`, `eval_input_config` or `model_config`
are not of the correct type.
"""
params = params or {}
if not isinstance(eval_config, eval_pb2.EvalConfig):
raise TypeError('For eval mode, the `eval_config` must be a '
'train_pb2.EvalConfig.')
if not isinstance(eval_input_config, input_reader_pb2.InputReader):
raise TypeError('The `eval_input_config` must be a '
'input_reader_pb2.InputReader.')
if not isinstance(model_config, model_pb2.DetectionModel):
raise TypeError('The `model_config` must be a '
'model_pb2.DetectionModel.')
if eval_config.force_no_resize:
arch = model_config.WhichOneof('model')
arch_config = getattr(model_config, arch)
image_resizer_proto = image_resizer_pb2.ImageResizer()
image_resizer_proto.identity_resizer.CopyFrom(
image_resizer_pb2.IdentityResizer())
arch_config.image_resizer.CopyFrom(image_resizer_proto)
if model is None:
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=False).preprocess
else:
model_preprocess_fn = model.preprocess
def transform_and_pad_input_data_fn(tensor_dict):
"""Combines transform and pad operation."""
num_classes = config_util.get_number_of_classes(model_config)
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
keypoint_type_weight = eval_input_config.keypoint_type_weight or None
transform_data_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None,
retain_original_image=eval_config.retain_original_images,
retain_original_image_additional_channels=
eval_config.retain_original_image_additional_channels,
keypoint_type_weight=keypoint_type_weight)
tensor_dict = pad_input_data_to_static_shapes(
tensor_dict=transform_data_fn(tensor_dict),
max_num_boxes=eval_input_config.max_number_of_boxes,
num_classes=config_util.get_number_of_classes(model_config),
spatial_image_shape=config_util.get_spatial_image_size(
image_resizer_config),
max_num_context_features=config_util.get_max_num_context_features(
model_config),
context_feature_length=config_util.get_context_feature_length(
model_config))
include_source_id = eval_input_config.include_source_id
return (_get_features_dict(tensor_dict, include_source_id),
_get_labels_dict(tensor_dict))
reduce_to_frame_fn = get_reduce_to_frame_fn(eval_input_config, False)
dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
eval_input_config,
batch_size=params['batch_size'] if params else eval_config.batch_size,
transform_input_data_fn=transform_and_pad_input_data_fn,
input_context=input_context,
reduce_to_frame_fn=reduce_to_frame_fn)
return dataset
def create_predict_input_fn(model_config, predict_input_config):
"""Creates a predict `input` function for `Estimator`.
Args:
model_config: A model_pb2.DetectionModel.
predict_input_config: An input_reader_pb2.InputReader.
Returns:
`input_fn` for `Estimator` in PREDICT mode.
"""
def _predict_input_fn(params=None):
"""Decodes serialized tf.Examples and returns `ServingInputReceiver`.
Args:
params: Parameter dictionary passed from the estimator.
Returns:
`ServingInputReceiver`.
"""
del params
example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example')
num_classes = config_util.get_number_of_classes(model_config)
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=False).preprocess
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
transform_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None)
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=False,
num_additional_channels=predict_input_config.num_additional_channels)
input_dict = transform_fn(decoder.decode(example))
images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32)
images = tf.expand_dims(images, axis=0)
true_image_shape = tf.expand_dims(
input_dict[fields.InputDataFields.true_image_shape], axis=0)
return tf.estimator.export.ServingInputReceiver(
features={
fields.InputDataFields.image: images,
fields.InputDataFields.true_image_shape: true_image_shape},
receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})
return _predict_input_fn
def get_reduce_to_frame_fn(input_reader_config, is_training):
"""Returns a function reducing sequence tensors to single frame tensors.
If the input type is not TF_SEQUENCE_EXAMPLE, the tensors are passed through
this function unchanged. Otherwise, when in training mode, a single frame is
selected at random from the sequence example, and the tensors for that frame
are converted to single frame tensors, with all associated context features.
In evaluation mode all frames are converted to single frame tensors with
copied context tensors. After the sequence example tensors are converted into
one or many single frame tensors, the images from each frame are decoded.
Args:
input_reader_config: An input_reader_pb2.InputReader.
is_training: Whether we are in training mode.
Returns:
`reduce_to_frame_fn` for the dataset builder
"""
if input_reader_config.input_type != (
input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE')):
return lambda dataset, dataset_map_fn, batch_size, config: dataset
else:
def reduce_to_frame(dataset, dataset_map_fn, batch_size,
input_reader_config):
"""Returns a function reducing sequence tensors to single frame tensors.
Args:
dataset: A tf dataset containing sequence tensors.
dataset_map_fn: A function that handles whether to
map_with_legacy_function for this dataset
batch_size: used if map_with_legacy_function is true to determine
num_parallel_calls
input_reader_config: used if map_with_legacy_function is true to
determine num_parallel_calls
Returns:
A tf dataset containing single frame tensors.
"""
if is_training:
def get_single_frame(tensor_dict):
"""Returns a random frame from a sequence.
Picks a random frame and returns slices of sequence tensors
corresponding to the random frame. Returns non-sequence tensors
unchanged.
Args:
tensor_dict: A dictionary containing sequence tensors.
Returns:
Tensors for a single random frame within the sequence.
"""
num_frames = tf.cast(
tf.shape(tensor_dict[fields.InputDataFields.source_id])[0],
dtype=tf.int32)
if input_reader_config.frame_index == -1:
frame_index = tf.random.uniform((), minval=0, maxval=num_frames,
dtype=tf.int32)
else:
frame_index = tf.constant(input_reader_config.frame_index,
dtype=tf.int32)
out_tensor_dict = {}
for key in tensor_dict:
if key in fields.SEQUENCE_FIELDS:
# Slice random frame from sequence tensors
out_tensor_dict[key] = tensor_dict[key][frame_index]
else:
# Copy all context tensors.
out_tensor_dict[key] = tensor_dict[key]
return out_tensor_dict
dataset = dataset_map_fn(dataset, get_single_frame, batch_size,
input_reader_config)
else:
dataset = dataset_map_fn(dataset, util_ops.tile_context_tensors,
batch_size, input_reader_config)
dataset = dataset.unbatch()
# Decode frame here as SequenceExample tensors contain encoded images.
dataset = dataset_map_fn(dataset, util_ops.decode_image, batch_size,
input_reader_config)
return dataset
return reduce_to_frame
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/inputs.py | inputs.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import os
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
import tf_slim as slim
from object_detection import eval_util
from object_detection import exporter as exporter_lib
from object_detection import inputs
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import learn as contrib_learn
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn':
inputs.create_train_input_fn,
'create_eval_input_fn':
inputs.create_eval_input_fn,
'create_predict_input_fn':
inputs.create_predict_input_fn,
'detection_model_fn_base': model_builder.build,
}
def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
max_number_of_boxes):
"""Extracts groundtruth data from detection_model and prepares it for eval.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
max_number_of_boxes: Max number of groundtruth boxes.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,
in normalized coordinates.
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes.
'groundtruth_masks': 4D float32 tensor of instance masks (if provided in
groundtruth)
'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating
is_crowd annotations (if provided in groundtruth).
'groundtruth_area': [batch_size, num_boxes] float32 tensor indicating
the area (in the original absolute coordinates) of annotations (if
provided in groundtruth).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image..
'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32
tensor of keypoints (if provided in groundtruth).
'groundtruth_dp_num_points_list': [batch_size, num_boxes] int32 tensor
with the number of DensePose points for each instance (if provided in
groundtruth).
'groundtruth_dp_part_ids_list': [batch_size, num_boxes,
max_sampled_points] int32 tensor with the part ids for each DensePose
sampled point (if provided in groundtruth).
'groundtruth_dp_surface_coords_list': [batch_size, num_boxes,
max_sampled_points, 4] containing the DensePose surface coordinates for
each sampled point (if provided in groundtruth).
'groundtruth_track_ids_list': [batch_size, num_boxes] int32 tensor
with track ID for each instance (if provided in groundtruth).
'groundtruth_group_of': [batch_size, num_boxes] bool tensor indicating
group_of annotations (if provided in groundtruth).
'groundtruth_labeled_classes': [batch_size, num_classes] int64
tensor of 1-indexed classes.
'groundtruth_verified_neg_classes': [batch_size, num_classes] float32
K-hot representation of 1-indexed classes which were verified as not
present in the image.
'groundtruth_not_exhaustive_classes': [batch_size, num_classes] K-hot
representation of 1-indexed classes which don't have all of their
instances marked exhaustively.
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.boxes))
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_classes_one_hot = tf.ones(
[groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])
else:
groundtruth_classes_one_hot = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.classes))
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.masks))
if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):
groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))
if detection_model.groundtruth_has_field(input_data_fields.groundtruth_area):
groundtruth[input_data_fields.groundtruth_area] = tf.stack(
detection_model.groundtruth_lists(input_data_fields.groundtruth_area))
if detection_model.groundtruth_has_field(fields.BoxListFields.keypoints):
groundtruth[input_data_fields.groundtruth_keypoints] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.keypoints))
if detection_model.groundtruth_has_field(
fields.BoxListFields.keypoint_depths):
groundtruth[input_data_fields.groundtruth_keypoint_depths] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.keypoint_depths))
groundtruth[
input_data_fields.groundtruth_keypoint_depth_weights] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.keypoint_depth_weights))
if detection_model.groundtruth_has_field(
fields.BoxListFields.keypoint_visibilities):
groundtruth[input_data_fields.groundtruth_keypoint_visibilities] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.keypoint_visibilities))
if detection_model.groundtruth_has_field(fields.BoxListFields.group_of):
groundtruth[input_data_fields.groundtruth_group_of] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.group_of))
label_id_offset_paddings = tf.constant([[0, 0], [1, 0]])
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_verified_neg_classes):
groundtruth[input_data_fields.groundtruth_verified_neg_classes] = tf.pad(
tf.stack(detection_model.groundtruth_lists(
input_data_fields.groundtruth_verified_neg_classes)),
label_id_offset_paddings)
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_not_exhaustive_classes):
groundtruth[
input_data_fields.groundtruth_not_exhaustive_classes] = tf.pad(
tf.stack(detection_model.groundtruth_lists(
input_data_fields.groundtruth_not_exhaustive_classes)),
label_id_offset_paddings)
if detection_model.groundtruth_has_field(
fields.BoxListFields.densepose_num_points):
groundtruth[input_data_fields.groundtruth_dp_num_points] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.densepose_num_points))
if detection_model.groundtruth_has_field(
fields.BoxListFields.densepose_part_ids):
groundtruth[input_data_fields.groundtruth_dp_part_ids] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.densepose_part_ids))
if detection_model.groundtruth_has_field(
fields.BoxListFields.densepose_surface_coords):
groundtruth[input_data_fields.groundtruth_dp_surface_coords] = tf.stack(
detection_model.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords))
if detection_model.groundtruth_has_field(fields.BoxListFields.track_ids):
groundtruth[input_data_fields.groundtruth_track_ids] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.track_ids))
if detection_model.groundtruth_has_field(
input_data_fields.groundtruth_labeled_classes):
groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.pad(
tf.stack(
detection_model.groundtruth_lists(
input_data_fields.groundtruth_labeled_classes)),
label_id_offset_paddings)
groundtruth[input_data_fields.num_groundtruth_boxes] = (
tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked, unpadded tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {
key: tf.unstack(tensor) for key, tensor in tensor_dict.items()
}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes not in
unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_instance_mask_weights,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_depths,
fields.InputDataFields.groundtruth_keypoint_depth_weights,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords,
fields.InputDataFields.groundtruth_track_ids,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def provide_groundtruth(model, labels):
"""Provides the labels to a model as groundtruth.
This helper function extracts the corresponding boxes, classes,
keypoints, weights, masks, etc. from the labels, and provides it
as groundtruth to the models.
Args:
model: The detection model to provide groundtruth to.
labels: The labels for the training or evaluation inputs.
"""
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[
fields.InputDataFields.groundtruth_instance_masks]
gt_mask_weights_list = None
if fields.InputDataFields.groundtruth_instance_mask_weights in labels:
gt_mask_weights_list = labels[
fields.InputDataFields.groundtruth_instance_mask_weights]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
gt_keypoint_depths_list = None
gt_keypoint_depth_weights_list = None
if fields.InputDataFields.groundtruth_keypoint_depths in labels:
gt_keypoint_depths_list = (
labels[fields.InputDataFields.groundtruth_keypoint_depths])
gt_keypoint_depth_weights_list = (
labels[fields.InputDataFields.groundtruth_keypoint_depth_weights])
gt_keypoint_visibilities_list = None
if fields.InputDataFields.groundtruth_keypoint_visibilities in labels:
gt_keypoint_visibilities_list = labels[
fields.InputDataFields.groundtruth_keypoint_visibilities]
gt_dp_num_points_list = None
if fields.InputDataFields.groundtruth_dp_num_points in labels:
gt_dp_num_points_list = labels[
fields.InputDataFields.groundtruth_dp_num_points]
gt_dp_part_ids_list = None
if fields.InputDataFields.groundtruth_dp_part_ids in labels:
gt_dp_part_ids_list = labels[
fields.InputDataFields.groundtruth_dp_part_ids]
gt_dp_surface_coords_list = None
if fields.InputDataFields.groundtruth_dp_surface_coords in labels:
gt_dp_surface_coords_list = labels[
fields.InputDataFields.groundtruth_dp_surface_coords]
gt_track_ids_list = None
if fields.InputDataFields.groundtruth_track_ids in labels:
gt_track_ids_list = labels[
fields.InputDataFields.groundtruth_track_ids]
gt_weights_list = None
if fields.InputDataFields.groundtruth_weights in labels:
gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]
gt_confidences_list = None
if fields.InputDataFields.groundtruth_confidences in labels:
gt_confidences_list = labels[
fields.InputDataFields.groundtruth_confidences]
gt_is_crowd_list = None
if fields.InputDataFields.groundtruth_is_crowd in labels:
gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]
gt_group_of_list = None
if fields.InputDataFields.groundtruth_group_of in labels:
gt_group_of_list = labels[fields.InputDataFields.groundtruth_group_of]
gt_area_list = None
if fields.InputDataFields.groundtruth_area in labels:
gt_area_list = labels[fields.InputDataFields.groundtruth_area]
gt_labeled_classes = None
if fields.InputDataFields.groundtruth_labeled_classes in labels:
gt_labeled_classes = labels[
fields.InputDataFields.groundtruth_labeled_classes]
gt_verified_neg_classes = None
if fields.InputDataFields.groundtruth_verified_neg_classes in labels:
gt_verified_neg_classes = labels[
fields.InputDataFields.groundtruth_verified_neg_classes]
gt_not_exhaustive_classes = None
if fields.InputDataFields.groundtruth_not_exhaustive_classes in labels:
gt_not_exhaustive_classes = labels[
fields.InputDataFields.groundtruth_not_exhaustive_classes]
model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_confidences_list=gt_confidences_list,
groundtruth_labeled_classes=gt_labeled_classes,
groundtruth_masks_list=gt_masks_list,
groundtruth_mask_weights_list=gt_mask_weights_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_keypoint_visibilities_list=gt_keypoint_visibilities_list,
groundtruth_dp_num_points_list=gt_dp_num_points_list,
groundtruth_dp_part_ids_list=gt_dp_part_ids_list,
groundtruth_dp_surface_coords_list=gt_dp_surface_coords_list,
groundtruth_weights_list=gt_weights_list,
groundtruth_is_crowd_list=gt_is_crowd_list,
groundtruth_group_of_list=gt_group_of_list,
groundtruth_area_list=gt_area_list,
groundtruth_track_ids_list=gt_track_ids_list,
groundtruth_verified_neg_classes=gt_verified_neg_classes,
groundtruth_not_exhaustive_classes=gt_not_exhaustive_classes,
groundtruth_keypoint_depths_list=gt_keypoint_depths_list,
groundtruth_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
def create_model_fn(detection_model_fn, configs, hparams=None, use_tpu=False,
postprocess_on_cpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for
use on TPU.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess
is scheduled on the host cpu.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf.estimator.ModeKeys.TRAIN
# Make sure to set the Keras learning phase. True during training,
# False for inference.
tf.keras.backend.set_learning_phase(is_training)
# Set policy for mixed-precision training with Keras-based models.
if use_tpu and train_config.use_bfloat16:
from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top
# Enable v2 behavior, as `mixed_bfloat16` is only supported in TF 2.0.
base_layer_utils.enable_v2_dtype_behavior()
tf2.keras.mixed_precision.set_global_policy('mixed_bfloat16')
detection_model = detection_model_fn(
is_training=is_training, add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf.estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape()
.as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
provide_groundtruth(detection_model, labels)
preprocessed_images = features[fields.InputDataFields.image]
side_inputs = detection_model.get_side_inputs(features)
if use_tpu and train_config.use_bfloat16:
with tf.tpu.bfloat16_scope():
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape], **side_inputs)
prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)
else:
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape], **side_inputs)
def postprocess_wrapper(args):
return detection_model.postprocess(args[0], args[1])
if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):
if use_tpu and postprocess_on_cpu:
detections = tf.tpu.outside_compilation(
postprocess_wrapper,
(prediction_dict,
features[fields.InputDataFields.true_image_shape]))
else:
detections = postprocess_wrapper((
prediction_dict,
features[fields.InputDataFields.true_image_shape]))
if mode == tf.estimator.ModeKeys.TRAIN:
load_pretrained = hparams.load_pretrained if hparams else False
if train_config.fine_tune_checkpoint and load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map,
train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
if (mode == tf.estimator.ModeKeys.EVAL and
eval_config.use_dummy_loss_in_eval):
total_loss = tf.constant(1.0)
losses_dict = {'Loss/total_loss': total_loss}
else:
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if train_config.add_regularization_loss:
regularization_losses = detection_model.regularization_losses()
if use_tpu and train_config.use_bfloat16:
regularization_losses = ops.bfloat16_to_float32_nested(
regularization_losses)
if regularization_losses:
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=is_training)
graph_rewriter_fn()
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.tpu.CrossShardOptimizer(training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
include_variables = (
train_config.update_trainable_variables
if train_config.update_trainable_variables else None)
exclude_variables = (
train_config.freeze_variables
if train_config.freeze_variables else None)
trainable_variables = slim.filter_variables(
tf.trainable_variables(),
include_patterns=include_variables,
exclude_patterns=exclude_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
if train_config.summarize_gradients:
summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']
train_op = slim.optimizers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
update_ops=detection_model.updates(),
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf.estimator.ModeKeys.PREDICT:
exported_output = exporter_lib.add_output_tensor_nodes(detections)
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(exported_output)
}
eval_metric_ops = None
scaffold = None
if mode == tf.estimator.ModeKeys.EVAL:
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
groundtruth = _prepare_groundtruth_for_eval(
detection_model, class_agnostic,
eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[fields.InputDataFields
.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
if fields.InputDataFields.image_additional_channels in features:
eval_dict[fields.InputDataFields.image_additional_channels] = features[
fields.InputDataFields.image_additional_channels]
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
vis_metric_ops = None
if not use_tpu and use_original_images:
keypoint_edges = [
(kp.start, kp.end) for kp in eval_config.keypoint_edge]
eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=eval_config.num_visualizations,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False,
keypoint_edges=keypoint_edges or None)
vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(
eval_dict)
# Eval metrics on a single example.
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, list(category_index.values()), eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if vis_metric_ops is not None:
eval_metric_ops.update(vis_metric_ops)
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
variables_to_restore,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
scaffold = tf.train.Scaffold(saver=saver)
# EVAL executes on CPU, so use regular non-TPU EstimatorSpec.
if use_tpu and mode != tf.estimator.ModeKeys.EVAL:
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
if scaffold is None:
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
sharded=True,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
scaffold = tf.train.Scaffold(saver=saver)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
scaffold=scaffold)
return model_fn
def create_estimator_and_inputs(run_config,
hparams=None,
pipeline_config_path=None,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=1,
sample_1_of_n_eval_on_train_examples=1,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
override_eval_num_epochs=True,
save_final_config=False,
postprocess_on_cpu=False,
export_to_tpu=None,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: (optional) A `HParams`.
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for
eval_input.
save_final_config: Whether to save final config (obtained after applying
overrides) to `estimator.model_dir`.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,
postprocess is scheduled on the host cpu.
export_to_tpu: When use_tpu and export_to_tpu are true,
`export_savedmodel()` exports a metagraph for serving on TPU besides the
one on CPU.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fns': A list of all evaluation input functions.
'eval_input_names': A list of names for each evaluation input.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
if sample_1_of_n_eval_examples >= 1:
kwargs.update({
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples
})
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
detection_model_fn = functools.partial(
detection_model_fn_base, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fns = []
for eval_input_config in eval_input_configs:
eval_input_fns.append(
create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config))
eval_input_names = [
eval_input_config.name for eval_input_config in eval_input_configs
]
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_on_train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(
model_config=model_config, predict_input_config=eval_input_configs[0])
# Read export_to_tpu from hparams if not passed.
if export_to_tpu is None and hparams is not None:
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',
use_tpu, export_to_tpu)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu,
postprocess_on_cpu)
if use_tpu_estimator:
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
export_to_tpu=export_to_tpu,
eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief and save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fns=eval_input_fns,
eval_input_names=eval_input_names,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps)
def create_train_and_eval_specs(train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False,
final_exporter_name='Servo',
eval_spec_names=None):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fns: A list of functions that produce features and labels on eval
data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_names: A list of string names for each `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is
True, the last `EvalSpec` in the list will correspond to training data. The
rest EvalSpecs in the list are evaluation datas.
"""
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
if eval_spec_names is None:
eval_spec_names = [str(i) for i in range(len(eval_input_fns))]
eval_specs = []
for index, (eval_spec_name, eval_input_fn) in enumerate(
zip(eval_spec_names, eval_input_fns)):
# Uses final_exporter_name as exporter_name for the first eval spec for
# backward compatibility.
if index == 0:
exporter_name = final_exporter_name
else:
exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)
exporter = tf.estimator.FinalExporter(
name=exporter_name, serving_input_receiver_fn=predict_input_fn)
eval_specs.append(
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=None,
exporters=exporter))
if eval_on_train_data:
eval_specs.append(
tf.estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))
return train_spec, eval_specs
def _evaluate_checkpoint(estimator,
input_fn,
checkpoint_path,
name,
max_retries=0):
"""Evaluates a checkpoint.
Args:
estimator: Estimator object to use for evaluation.
input_fn: Input function to use for evaluation.
checkpoint_path: Path of the checkpoint to evaluate.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
Returns:
Estimator evaluation results.
"""
always_retry = True if max_retries < 0 else False
retries = 0
while always_retry or retries <= max_retries:
try:
return estimator.evaluate(
input_fn=input_fn,
steps=None,
checkpoint_path=checkpoint_path,
name=name)
except tf.errors.InvalidArgumentError as e:
if always_retry or retries < max_retries:
tf.logging.info('Retrying checkpoint evaluation after exception: %s', e)
retries += 1
else:
raise e
def continuous_eval_generator(estimator,
model_dir,
input_fn,
train_steps,
name,
max_retries=0):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
Yields:
Pair of current step and eval_results.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.train.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = _evaluate_checkpoint(
estimator=estimator,
input_fn=input_fn,
checkpoint_path=ckpt,
name=name,
max_retries=max_retries)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
yield (current_step, eval_results)
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def continuous_eval(estimator,
model_dir,
input_fn,
train_steps,
name,
max_retries=0):
"""Performs continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
"""
for current_step, eval_results in continuous_eval_generator(
estimator, model_dir, input_fn, train_steps, name, max_retries):
tf.logging.info('Step %s, Eval results: %s', current_step, eval_results)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
save_final_config=True,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
export_strategies = [
contrib_learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return contrib_learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fns[0],
train_steps=train_steps,
eval_steps=None,
export_strategies=export_strategies,
eval_delay_secs=120,
)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_lib.py | model_lib.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import pprint
import time
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection import inputs
from object_detection import model_lib
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vutils
MODEL_BUILD_UTIL_MAP = model_lib.MODEL_BUILD_UTIL_MAP
NUM_STEPS_PER_ITERATION = 100
RESTORE_MAP_ERROR_TEMPLATE = (
'Since we are restoring a v2 style checkpoint'
' restore_map was expected to return a (str -> Model) mapping,'
' but we received a ({} -> {}) mapping instead.'
)
def _compute_losses_and_predictions_dicts(
model, features, labels,
add_regularization_loss=True):
"""Computes the losses dict and predictions dict for a model on inputs.
Args:
model: a DetectionModel (based on Keras).
features: Dictionary of feature tensors from the input dataset.
Should be in the format output by `inputs.train_input` and
`inputs.eval_input`.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: A dictionary of groundtruth tensors post-unstacking. The original
labels are of the form returned by `inputs.train_input` and
`inputs.eval_input`. The shapes may have been modified by unstacking with
`model_lib.unstack_batch`. However, the dictionary includes the following
fields.
labels[fields.InputDataFields.num_groundtruth_boxes] is a
int32 tensor indicating the number of valid groundtruth boxes
per image.
labels[fields.InputDataFields.groundtruth_boxes] is a float32 tensor
containing the corners of the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a float32
one-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_weights] is a float32 tensor
containing groundtruth weights for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
float32 tensor containing only binary values, which represent
instance masks for objects.
labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a
float32 tensor containing weights for the instance masks.
labels[fields.InputDataFields.groundtruth_keypoints] is a
float32 tensor containing keypoints for each box.
labels[fields.InputDataFields.groundtruth_dp_num_points] is an int32
tensor with the number of sampled DensePose points per object.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is an int32
tensor with the DensePose part ids (0-indexed) per object.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
float32 tensor with the DensePose surface coordinates.
labels[fields.InputDataFields.groundtruth_group_of] is a tf.bool tensor
containing group_of annotations.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32
k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_track_ids] is a int32
tensor of track IDs.
labels[fields.InputDataFields.groundtruth_keypoint_depths] is a
float32 tensor containing keypoint depths information.
labels[fields.InputDataFields.groundtruth_keypoint_depth_weights] is a
float32 tensor containing the weights of the keypoint depth feature.
add_regularization_loss: Whether or not to include the model's
regularization loss in the losses dictionary.
Returns:
A tuple containing the losses dictionary (with the total loss under
the key 'Loss/total_loss'), and the predictions dictionary produced by
`model.predict`.
"""
model_lib.provide_groundtruth(model, labels)
preprocessed_images = features[fields.InputDataFields.image]
prediction_dict = model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape],
**model.get_side_inputs(features))
prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)
losses_dict = model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if add_regularization_loss:
# TODO(kaftan): As we figure out mixed precision & bfloat 16, we may
## need to convert these regularization losses from bfloat16 to float32
## as well.
regularization_losses = model.regularization_losses()
if regularization_losses:
regularization_losses = ops.bfloat16_to_float32_nested(
regularization_losses)
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
return losses_dict, prediction_dict
def _ensure_model_is_built(model, input_dataset, unpad_groundtruth_tensors):
"""Ensures that model variables are all built, by running on a dummy input.
Args:
model: A DetectionModel to be built.
input_dataset: The tf.data Dataset the model is being trained on. Needed to
get the shapes for the dummy loss computation.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
"""
features, labels = iter(input_dataset).next()
@tf.function
def _dummy_computation_fn(features, labels):
model._is_training = False # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(False)
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
return _compute_losses_and_predictions_dicts(model, features, labels)
strategy = tf.compat.v2.distribute.get_strategy()
if hasattr(tf.distribute.Strategy, 'run'):
strategy.run(
_dummy_computation_fn, args=(
features,
labels,
))
else:
strategy.experimental_run_v2(
_dummy_computation_fn, args=(
features,
labels,
))
def normalize_dict(values_dict, num_replicas):
num_replicas = tf.constant(num_replicas, dtype=tf.float32)
return {key: tf.math.divide(loss, num_replicas) for key, loss
in values_dict.items()}
def reduce_dict(strategy, reduction_dict, reduction_op):
# TODO(anjalisridhar): explore if it is safe to remove the # num_replicas
# scaling of the loss and switch this to a ReduceOp.Mean
return {
name: strategy.reduce(reduction_op, loss, axis=None)
for name, loss in reduction_dict.items()
}
# TODO(kaftan): Explore removing learning_rate from this method & returning
## The full losses dict instead of just total_loss, then doing all summaries
## saving in a utility method called by the outer training loop.
# TODO(kaftan): Explore adding gradient summaries
def eager_train_step(detection_model,
features,
labels,
unpad_groundtruth_tensors,
optimizer,
add_regularization_loss=True,
clip_gradients_value=None,
num_replicas=1.0):
"""Process a single training batch.
This method computes the loss for the model on a single training batch,
while tracking the gradients with a gradient tape. It then updates the
model variables with the optimizer, clipping the gradients if
clip_gradients_value is present.
This method can run eagerly or inside a tf.function.
Args:
detection_model: A DetectionModel (based on Keras) to train.
features: Dictionary of feature tensors from the input dataset.
Should be in the format output by `inputs.train_input.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional, not used
during training) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: A dictionary of groundtruth tensors. This method unstacks
these labels using model_lib.unstack_batch. The stacked labels are of
the form returned by `inputs.train_input` and `inputs.eval_input`.
labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]
int32 tensor indicating the number of valid groundtruth boxes
per image.
labels[fields.InputDataFields.groundtruth_boxes] is a
[batch_size, num_boxes, 4] float32 tensor containing the corners of
the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[batch_size, num_boxes, num_classes] float32 one-hot tensor of
classes. num_classes includes the background class.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[batch_size, num_boxes, H, W] float32 tensor containing only binary
values, which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a
[batch_size, num_boxes] float32 tensor containing weights for the
instance masks.
labels[fields.InputDataFields.groundtruth_keypoints] is a
[batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
keypoints for each box.
labels[fields.InputDataFields.groundtruth_dp_num_points] is a
[batch_size, num_boxes] int32 tensor with the number of DensePose
sampled points per instance.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is a
[batch_size, num_boxes, max_sampled_points] int32 tensor with the
part ids (0-indexed) for each instance.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
[batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the
surface coordinates for each point. Each surface coordinate is of the
form (y, x, v, u) where (y, x) are normalized image locations and
(v, u) are part-relative normalized surface coordinates.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32
k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_track_ids] is a int32
tensor of track IDs.
labels[fields.InputDataFields.groundtruth_keypoint_depths] is a
float32 tensor containing keypoint depths information.
labels[fields.InputDataFields.groundtruth_keypoint_depth_weights] is a
float32 tensor containing the weights of the keypoint depth feature.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
optimizer: The training optimizer that will update the variables.
add_regularization_loss: Whether or not to include the model's
regularization loss in the losses dictionary.
clip_gradients_value: If this is present, clip the gradients global norm
at this value using `tf.clip_by_global_norm`.
num_replicas: The number of replicas in the current distribution strategy.
This is used to scale the total loss so that training in a distribution
strategy works correctly.
Returns:
The total loss observed at this training step
"""
# """Execute a single training step in the TF v2 style loop."""
is_training = True
detection_model._is_training = is_training # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(is_training)
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
with tf.GradientTape() as tape:
losses_dict, _ = _compute_losses_and_predictions_dicts(
detection_model, features, labels, add_regularization_loss)
losses_dict = normalize_dict(losses_dict, num_replicas)
trainable_variables = detection_model.trainable_variables
total_loss = losses_dict['Loss/total_loss']
gradients = tape.gradient(total_loss, trainable_variables)
if clip_gradients_value:
gradients, _ = tf.clip_by_global_norm(gradients, clip_gradients_value)
optimizer.apply_gradients(zip(gradients, trainable_variables))
return losses_dict
def validate_tf_v2_checkpoint_restore_map(checkpoint_restore_map):
"""Ensure that given dict is a valid TF v2 style restore map.
Args:
checkpoint_restore_map: A nested dict mapping strings to
tf.keras.Model objects.
Raises:
ValueError: If they keys in checkpoint_restore_map are not strings or if
the values are not keras Model objects.
"""
for key, value in checkpoint_restore_map.items():
if not (isinstance(key, str) and
(isinstance(value, tf.Module)
or isinstance(value, tf.train.Checkpoint))):
if isinstance(key, str) and isinstance(value, dict):
validate_tf_v2_checkpoint_restore_map(value)
else:
raise TypeError(
RESTORE_MAP_ERROR_TEMPLATE.format(key.__class__.__name__,
value.__class__.__name__))
def is_object_based_checkpoint(checkpoint_path):
"""Returns true if `checkpoint_path` points to an object-based checkpoint."""
var_names = [var[0] for var in tf.train.list_variables(checkpoint_path)]
return '_CHECKPOINTABLE_OBJECT_GRAPH' in var_names
def load_fine_tune_checkpoint(model, checkpoint_path, checkpoint_type,
checkpoint_version, run_model_on_dummy_input,
input_dataset, unpad_groundtruth_tensors):
"""Load a fine tuning classification or detection checkpoint.
To make sure the model variables are all built, this method first executes
the model by computing a dummy loss. (Models might not have built their
variables before their first execution)
It then loads an object-based classification or detection checkpoint.
This method updates the model in-place and does not return a value.
Args:
model: A DetectionModel (based on Keras) to load a fine-tuning
checkpoint for.
checkpoint_path: Directory with checkpoints file or path to checkpoint.
checkpoint_type: Whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`.
checkpoint_version: train_pb2.CheckpointVersion.V1 or V2 enum indicating
whether to load checkpoints in V1 style or V2 style. In this binary
we only support V2 style (object-based) checkpoints.
run_model_on_dummy_input: Whether to run the model on a dummy input in order
to ensure that all model variables have been built successfully before
loading the fine_tune_checkpoint.
input_dataset: The tf.data Dataset the model is being trained on. Needed
to get the shapes for the dummy loss computation.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
Raises:
IOError: if `checkpoint_path` does not point at a valid object-based
checkpoint
ValueError: if `checkpoint_version` is not train_pb2.CheckpointVersion.V2
"""
if not is_object_based_checkpoint(checkpoint_path):
raise IOError('Checkpoint is expected to be an object-based checkpoint.')
if checkpoint_version == train_pb2.CheckpointVersion.V1:
raise ValueError('Checkpoint version should be V2')
if run_model_on_dummy_input:
_ensure_model_is_built(model, input_dataset, unpad_groundtruth_tensors)
restore_from_objects_dict = model.restore_from_objects(
fine_tune_checkpoint_type=checkpoint_type)
validate_tf_v2_checkpoint_restore_map(restore_from_objects_dict)
ckpt = tf.train.Checkpoint(**restore_from_objects_dict)
ckpt.restore(
checkpoint_path).expect_partial().assert_existing_objects_matched()
def get_filepath(strategy, filepath):
"""Get appropriate filepath for worker.
Args:
strategy: A tf.distribute.Strategy object.
filepath: A path to where the Checkpoint object is stored.
Returns:
A temporary filepath for non-chief workers to use or the original filepath
for the chief.
"""
if strategy.extended.should_checkpoint:
return filepath
else:
# TODO(vighneshb) Replace with the public API when TF exposes it.
task_id = strategy.extended._task_id # pylint:disable=protected-access
return os.path.join(filepath, 'temp_worker_{:03d}'.format(task_id))
def clean_temporary_directories(strategy, filepath):
"""Temporary directory clean up for MultiWorker Mirrored Strategy.
This is needed for all non-chief workers.
Args:
strategy: A tf.distribute.Strategy object.
filepath: The filepath for the temporary directory.
"""
if not strategy.extended.should_checkpoint:
if tf.io.gfile.exists(filepath) and tf.io.gfile.isdir(filepath):
tf.io.gfile.rmtree(filepath)
def train_loop(
pipeline_config_path,
model_dir,
config_override=None,
train_steps=None,
use_tpu=False,
save_final_config=False,
checkpoint_every_n=1000,
checkpoint_max_to_keep=7,
record_summaries=True,
performance_summary_exporter=None,
num_steps_per_iteration=NUM_STEPS_PER_ITERATION,
**kwargs):
"""Trains a model using eager + functions.
This method:
1. Processes the pipeline configs
2. (Optionally) saves the as-run config
3. Builds the model & optimizer
4. Gets the training input data
5. Loads a fine-tuning detection or classification checkpoint if requested
6. Loops over the train data, executing distributed training steps inside
tf.functions.
7. Checkpoints the model every `checkpoint_every_n` training steps.
8. Logs the training metrics as TensorBoard summaries.
Args:
pipeline_config_path: A path to a pipeline config file.
model_dir:
The directory to save checkpoints and summaries to.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
use_tpu: Boolean, whether training and evaluation should run on TPU.
save_final_config: Whether to save final config (obtained after applying
overrides) to `model_dir`.
checkpoint_every_n:
Checkpoint every n training steps.
checkpoint_max_to_keep:
int, the number of most recent checkpoints to keep in the model directory.
record_summaries: Boolean, whether or not to record summaries defined by
the model or the training pipeline. This does not impact the summaries
of the loss values which are always recorded. Examples of summaries
that are controlled by this flag include:
- Image summaries of training images.
- Intermediate tensors which maybe logged by meta architectures.
performance_summary_exporter: function for exporting performance metrics.
num_steps_per_iteration: int, The number of training steps to perform
in each iteration.
**kwargs: Additional keyword arguments for configuration override.
"""
## Parse the configs
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
steps_per_sec_list = []
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
configs = merge_external_params_with_configs(
configs, None, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
unpad_groundtruth_tensors = train_config.unpad_groundtruth_tensors
add_regularization_loss = train_config.add_regularization_loss
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
if kwargs['use_bfloat16']:
tf.compat.v2.keras.mixed_precision.set_global_policy('mixed_bfloat16')
if train_config.load_all_detection_checkpoint_vars:
raise ValueError('train_pb2.load_all_detection_checkpoint_vars '
'unsupported in TF2')
config_util.update_fine_tune_checkpoint_type(train_config)
fine_tune_checkpoint_type = train_config.fine_tune_checkpoint_type
fine_tune_checkpoint_version = train_config.fine_tune_checkpoint_version
# Write the as-run pipeline config to disk.
if save_final_config:
tf.logging.info('Saving pipeline config file to directory {}'.format(
model_dir))
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, model_dir)
# Build the model, optimizer, and training input
strategy = tf.compat.v2.distribute.get_strategy()
with strategy.scope():
detection_model = MODEL_BUILD_UTIL_MAP['detection_model_fn_base'](
model_config=model_config, is_training=True,
add_summaries=record_summaries)
def train_dataset_fn(input_context):
"""Callable to create train input."""
# Create the inputs.
train_input = inputs.train_input(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config,
model=detection_model,
input_context=input_context)
train_input = train_input.repeat()
return train_input
train_input = strategy.experimental_distribute_datasets_from_function(
train_dataset_fn)
global_step = tf.Variable(
0, trainable=False, dtype=tf.compat.v2.dtypes.int64, name='global_step',
aggregation=tf.compat.v2.VariableAggregation.ONLY_FIRST_REPLICA)
optimizer, (learning_rate,) = optimizer_builder.build(
train_config.optimizer, global_step=global_step)
# We run the detection_model on dummy inputs in order to ensure that the
# model and all its variables have been properly constructed. Specifically,
# this is currently necessary prior to (potentially) creating shadow copies
# of the model variables for the EMA optimizer.
if train_config.optimizer.use_moving_average:
_ensure_model_is_built(detection_model, train_input,
unpad_groundtruth_tensors)
optimizer.shadow_copy(detection_model)
if callable(learning_rate):
learning_rate_fn = learning_rate
else:
learning_rate_fn = lambda: learning_rate
## Train the model
# Get the appropriate filepath (temporary or not) based on whether the worker
# is the chief.
summary_writer_filepath = get_filepath(strategy,
os.path.join(model_dir, 'train'))
summary_writer = tf.compat.v2.summary.create_file_writer(
summary_writer_filepath)
with summary_writer.as_default():
with strategy.scope():
with tf.compat.v2.summary.record_if(
lambda: global_step % num_steps_per_iteration == 0):
# Load a fine-tuning checkpoint.
if train_config.fine_tune_checkpoint:
variables_helper.ensure_checkpoint_supported(
train_config.fine_tune_checkpoint, fine_tune_checkpoint_type,
model_dir)
load_fine_tune_checkpoint(
detection_model, train_config.fine_tune_checkpoint,
fine_tune_checkpoint_type, fine_tune_checkpoint_version,
train_config.run_fine_tune_checkpoint_dummy_computation,
train_input, unpad_groundtruth_tensors)
ckpt = tf.compat.v2.train.Checkpoint(
step=global_step, model=detection_model, optimizer=optimizer)
manager_dir = get_filepath(strategy, model_dir)
if not strategy.extended.should_checkpoint:
checkpoint_max_to_keep = 1
manager = tf.compat.v2.train.CheckpointManager(
ckpt, manager_dir, max_to_keep=checkpoint_max_to_keep)
# We use the following instead of manager.latest_checkpoint because
# manager_dir does not point to the model directory when we are running
# in a worker.
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
ckpt.restore(latest_checkpoint)
def train_step_fn(features, labels):
"""Single train step."""
if record_summaries:
tf.compat.v2.summary.image(
name='train_input_images',
step=global_step,
data=features[fields.InputDataFields.image],
max_outputs=3)
losses_dict = eager_train_step(
detection_model,
features,
labels,
unpad_groundtruth_tensors,
optimizer,
add_regularization_loss=add_regularization_loss,
clip_gradients_value=clip_gradients_value,
num_replicas=strategy.num_replicas_in_sync)
global_step.assign_add(1)
return losses_dict
def _sample_and_train(strategy, train_step_fn, data_iterator):
features, labels = data_iterator.next()
if hasattr(tf.distribute.Strategy, 'run'):
per_replica_losses_dict = strategy.run(
train_step_fn, args=(features, labels))
else:
per_replica_losses_dict = (
strategy.experimental_run_v2(
train_step_fn, args=(features, labels)))
return reduce_dict(
strategy, per_replica_losses_dict, tf.distribute.ReduceOp.SUM)
@tf.function
def _dist_train_step(data_iterator):
"""A distributed train step."""
if num_steps_per_iteration > 1:
for _ in tf.range(num_steps_per_iteration - 1):
# Following suggestion on yaqs/5402607292645376
with tf.name_scope(''):
_sample_and_train(strategy, train_step_fn, data_iterator)
return _sample_and_train(strategy, train_step_fn, data_iterator)
train_input_iter = iter(train_input)
if int(global_step.value()) == 0:
manager.save()
checkpointed_step = int(global_step.value())
logged_step = global_step.value()
last_step_time = time.time()
for _ in range(global_step.value(), train_steps,
num_steps_per_iteration):
losses_dict = _dist_train_step(train_input_iter)
time_taken = time.time() - last_step_time
last_step_time = time.time()
steps_per_sec = num_steps_per_iteration * 1.0 / time_taken
tf.compat.v2.summary.scalar(
'steps_per_sec', steps_per_sec, step=global_step)
steps_per_sec_list.append(steps_per_sec)
logged_dict = losses_dict.copy()
logged_dict['learning_rate'] = learning_rate_fn()
for key, val in logged_dict.items():
tf.compat.v2.summary.scalar(key, val, step=global_step)
if global_step.value() - logged_step >= 100:
logged_dict_np = {name: value.numpy() for name, value in
logged_dict.items()}
tf.logging.info(
'Step {} per-step time {:.3f}s'.format(
global_step.value(), time_taken / num_steps_per_iteration))
tf.logging.info(pprint.pformat(logged_dict_np, width=40))
logged_step = global_step.value()
if ((int(global_step.value()) - checkpointed_step) >=
checkpoint_every_n):
manager.save()
checkpointed_step = int(global_step.value())
# Remove the checkpoint directories of the non-chief workers that
# MultiWorkerMirroredStrategy forces us to save during sync distributed
# training.
clean_temporary_directories(strategy, manager_dir)
clean_temporary_directories(strategy, summary_writer_filepath)
# TODO(pkanwar): add accuracy metrics.
if performance_summary_exporter is not None:
metrics = {
'steps_per_sec': np.mean(steps_per_sec_list),
'steps_per_sec_p50': np.median(steps_per_sec_list),
'steps_per_sec_max': max(steps_per_sec_list),
'last_batch_loss': float(losses_dict['Loss/total_loss'])
}
mixed_precision = 'bf16' if kwargs['use_bfloat16'] else 'fp32'
performance_summary_exporter(metrics, mixed_precision)
def prepare_eval_dict(detections, groundtruth, features):
"""Prepares eval dictionary containing detections and groundtruth.
Takes in `detections` from the model, `groundtruth` and `features` returned
from the eval tf.data.dataset and creates a dictionary of tensors suitable
for detection eval modules.
Args:
detections: A dictionary of tensors returned by `model.postprocess`.
groundtruth: `inputs.eval_input` returns an eval dataset of (features,
labels) tuple. `groundtruth` must be set to `labels`.
Please note that:
* fields.InputDataFields.groundtruth_classes must be 0-indexed and
in its 1-hot representation.
* fields.InputDataFields.groundtruth_verified_neg_classes must be
0-indexed and in its multi-hot repesentation.
* fields.InputDataFields.groundtruth_not_exhaustive_classes must be
0-indexed and in its multi-hot repesentation.
* fields.InputDataFields.groundtruth_labeled_classes must be
0-indexed and in its multi-hot repesentation.
features: `inputs.eval_input` returns an eval dataset of (features, labels)
tuple. This argument must be set to a dictionary containing the following
keys and their corresponding values from `features` --
* fields.InputDataFields.image
* fields.InputDataFields.original_image
* fields.InputDataFields.original_image_spatial_shape
* fields.InputDataFields.true_image_shape
* inputs.HASH_KEY
Returns:
eval_dict: A dictionary of tensors to pass to eval module.
class_agnostic: Whether to evaluate detection in class agnostic mode.
"""
groundtruth_boxes = groundtruth[fields.InputDataFields.groundtruth_boxes]
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
if class_agnostic:
groundtruth_classes_one_hot = tf.ones(
[groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])
else:
groundtruth_classes_one_hot = groundtruth[
fields.InputDataFields.groundtruth_classes]
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)
groundtruth[fields.InputDataFields.groundtruth_classes] = groundtruth_classes
label_id_offset_paddings = tf.constant([[0, 0], [1, 0]])
if fields.InputDataFields.groundtruth_verified_neg_classes in groundtruth:
groundtruth[
fields.InputDataFields.groundtruth_verified_neg_classes] = tf.pad(
groundtruth[
fields.InputDataFields.groundtruth_verified_neg_classes],
label_id_offset_paddings)
if fields.InputDataFields.groundtruth_not_exhaustive_classes in groundtruth:
groundtruth[
fields.InputDataFields.groundtruth_not_exhaustive_classes] = tf.pad(
groundtruth[
fields.InputDataFields.groundtruth_not_exhaustive_classes],
label_id_offset_paddings)
if fields.InputDataFields.groundtruth_labeled_classes in groundtruth:
groundtruth[fields.InputDataFields.groundtruth_labeled_classes] = tf.pad(
groundtruth[fields.InputDataFields.groundtruth_labeled_classes],
label_id_offset_paddings)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = features[fields.InputDataFields.true_image_shape][:, :3]
original_image_spatial_shapes = features[
fields.InputDataFields.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
return eval_dict, class_agnostic
def concat_replica_results(tensor_dict):
new_tensor_dict = {}
for key, values in tensor_dict.items():
new_tensor_dict[key] = tf.concat(values, axis=0)
return new_tensor_dict
def eager_eval_loop(
detection_model,
configs,
eval_dataset,
use_tpu=False,
postprocess_on_cpu=False,
global_step=None,
):
"""Evaluate the model eagerly on the evaluation dataset.
This method will compute the evaluation metrics specified in the configs on
the entire evaluation dataset, then return the metrics. It will also log
the metrics to TensorBoard.
Args:
detection_model: A DetectionModel (based on Keras) to evaluate.
configs: Object detection configs that specify the evaluators that should
be used, as well as whether regularization loss should be included and
if bfloat16 should be used on TPUs.
eval_dataset: Dataset containing evaluation data.
use_tpu: Whether a TPU is being used to execute the model for evaluation.
postprocess_on_cpu: Whether model postprocessing should happen on
the CPU when using a TPU to execute the model.
global_step: A variable containing the training step this model was trained
to. Used for logging purposes.
Returns:
A dict of evaluation metrics representing the results of this evaluation.
"""
del postprocess_on_cpu
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
add_regularization_loss = train_config.add_regularization_loss
is_training = False
detection_model._is_training = is_training # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(is_training)
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
batch_size = eval_config.batch_size
class_agnostic_category_index = (
label_map_util.create_class_agnostic_category_index())
class_agnostic_evaluators = eval_util.get_evaluators(
eval_config,
list(class_agnostic_category_index.values()),
evaluator_options)
class_aware_evaluators = None
if eval_input_config.label_map_path:
class_aware_category_index = (
label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path))
class_aware_evaluators = eval_util.get_evaluators(
eval_config,
list(class_aware_category_index.values()),
evaluator_options)
evaluators = None
loss_metrics = {}
@tf.function
def compute_eval_dict(features, labels):
"""Compute the evaluation result on an image."""
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list())
unpad_groundtruth_tensors = (boxes_shape[1] is not None
and not use_tpu
and batch_size == 1)
groundtruth_dict = labels
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
losses_dict, prediction_dict = _compute_losses_and_predictions_dicts(
detection_model, features, labels, add_regularization_loss)
prediction_dict = detection_model.postprocess(
prediction_dict, features[fields.InputDataFields.true_image_shape])
eval_features = {
fields.InputDataFields.image:
features[fields.InputDataFields.image],
fields.InputDataFields.original_image:
features[fields.InputDataFields.original_image],
fields.InputDataFields.original_image_spatial_shape:
features[fields.InputDataFields.original_image_spatial_shape],
fields.InputDataFields.true_image_shape:
features[fields.InputDataFields.true_image_shape],
inputs.HASH_KEY: features[inputs.HASH_KEY],
}
return losses_dict, prediction_dict, groundtruth_dict, eval_features
agnostic_categories = label_map_util.create_class_agnostic_category_index()
per_class_categories = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
keypoint_edges = [
(kp.start, kp.end) for kp in eval_config.keypoint_edge]
strategy = tf.compat.v2.distribute.get_strategy()
for i, (features, labels) in enumerate(eval_dataset):
try:
(losses_dict, prediction_dict, groundtruth_dict,
eval_features) = strategy.run(
compute_eval_dict, args=(features, labels))
except Exception as exc: # pylint:disable=broad-except
tf.logging.info('Encountered %s exception.', exc)
tf.logging.info('A replica probably exhausted all examples. Skipping '
'pending examples on other replicas.')
break
(local_prediction_dict, local_groundtruth_dict,
local_eval_features) = tf.nest.map_structure(
strategy.experimental_local_results,
[prediction_dict, groundtruth_dict, eval_features])
local_prediction_dict = concat_replica_results(local_prediction_dict)
local_groundtruth_dict = concat_replica_results(local_groundtruth_dict)
local_eval_features = concat_replica_results(local_eval_features)
eval_dict, class_agnostic = prepare_eval_dict(local_prediction_dict,
local_groundtruth_dict,
local_eval_features)
for loss_key, loss_tensor in iter(losses_dict.items()):
losses_dict[loss_key] = strategy.reduce(tf.distribute.ReduceOp.MEAN,
loss_tensor, None)
if class_agnostic:
category_index = agnostic_categories
else:
category_index = per_class_categories
if i % 100 == 0:
tf.logging.info('Finished eval step %d', i)
use_original_images = fields.InputDataFields.original_image in features
if (use_original_images and i < eval_config.num_visualizations):
sbys_image_list = vutils.draw_side_by_side_evaluation_image(
eval_dict,
category_index=category_index,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False,
keypoint_edges=keypoint_edges or None)
for j, sbys_image in enumerate(sbys_image_list):
tf.compat.v2.summary.image(
name='eval_side_by_side_{}_{}'.format(i, j),
step=global_step,
data=sbys_image,
max_outputs=eval_config.num_visualizations)
if eval_util.has_densepose(eval_dict):
dp_image_list = vutils.draw_densepose_visualizations(
eval_dict)
for j, dp_image in enumerate(dp_image_list):
tf.compat.v2.summary.image(
name='densepose_detections_{}_{}'.format(i, j),
step=global_step,
data=dp_image,
max_outputs=eval_config.num_visualizations)
if evaluators is None:
if class_agnostic:
evaluators = class_agnostic_evaluators
else:
evaluators = class_aware_evaluators
for evaluator in evaluators:
evaluator.add_eval_dict(eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
if loss_key not in loss_metrics:
loss_metrics[loss_key] = []
loss_metrics[loss_key].append(loss_tensor)
eval_metrics = {}
for evaluator in evaluators:
eval_metrics.update(evaluator.evaluate())
for loss_key in loss_metrics:
eval_metrics[loss_key] = tf.reduce_mean(loss_metrics[loss_key])
eval_metrics = {str(k): v for k, v in eval_metrics.items()}
tf.logging.info('Eval metrics at step %d', global_step.numpy())
for k in eval_metrics:
tf.compat.v2.summary.scalar(k, eval_metrics[k], step=global_step)
tf.logging.info('\t+ %s: %f', k, eval_metrics[k])
return eval_metrics
def eval_continuously(
pipeline_config_path,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=1,
sample_1_of_n_eval_on_train_examples=1,
use_tpu=False,
override_eval_num_epochs=True,
postprocess_on_cpu=False,
model_dir=None,
checkpoint_dir=None,
wait_interval=180,
timeout=3600,
eval_index=0,
save_final_config=False,
**kwargs):
"""Run continuous evaluation of a detection model eagerly.
This method builds the model, and continously restores it from the most
recent training checkpoint in the checkpoint directory & evaluates it
on the evaluation data.
Args:
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
use_tpu: Boolean, whether training and evaluation should run on TPU.
override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for
eval_input.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,
postprocess is scheduled on the host cpu.
model_dir: Directory to output resulting evaluation summaries to.
checkpoint_dir: Directory that contains the training checkpoints.
wait_interval: The mimmum number of seconds to wait before checking for a
new checkpoint.
timeout: The maximum number of seconds to wait for a checkpoint. Execution
will terminate if no new checkpoints are found after these many seconds.
eval_index: int, If given, only evaluate the dataset at the given
index. By default, evaluates dataset at 0'th index.
save_final_config: Whether to save the pipeline config file to the model
directory.
**kwargs: Additional keyword arguments for configuration override.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
if train_steps is not None:
kwargs['train_steps'] = train_steps
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, None, kwargs_dict=kwargs)
if model_dir and save_final_config:
tf.logging.info('Saving pipeline config file to directory {}'.format(
model_dir))
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, model_dir)
model_config = configs['model']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
if kwargs['use_bfloat16']:
tf.compat.v2.keras.mixed_precision.set_global_policy('mixed_bfloat16')
eval_input_config = eval_input_configs[eval_index]
strategy = tf.compat.v2.distribute.get_strategy()
with strategy.scope():
detection_model = MODEL_BUILD_UTIL_MAP['detection_model_fn_base'](
model_config=model_config, is_training=True)
eval_input = strategy.experimental_distribute_dataset(
inputs.eval_input(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config,
model=detection_model))
global_step = tf.compat.v2.Variable(
0, trainable=False, dtype=tf.compat.v2.dtypes.int64)
optimizer, _ = optimizer_builder.build(
configs['train_config'].optimizer, global_step=global_step)
for latest_checkpoint in tf.train.checkpoints_iterator(
checkpoint_dir, timeout=timeout, min_interval_secs=wait_interval):
ckpt = tf.compat.v2.train.Checkpoint(
step=global_step, model=detection_model, optimizer=optimizer)
# We run the detection_model on dummy inputs in order to ensure that the
# model and all its variables have been properly constructed. Specifically,
# this is currently necessary prior to (potentially) creating shadow copies
# of the model variables for the EMA optimizer.
if eval_config.use_moving_averages:
unpad_groundtruth_tensors = (eval_config.batch_size == 1 and not use_tpu)
_ensure_model_is_built(detection_model, eval_input,
unpad_groundtruth_tensors)
optimizer.shadow_copy(detection_model)
ckpt.restore(latest_checkpoint).expect_partial()
if eval_config.use_moving_averages:
optimizer.swap_weights()
summary_writer = tf.compat.v2.summary.create_file_writer(
os.path.join(model_dir, 'eval', eval_input_config.name))
with summary_writer.as_default():
eager_eval_loop(
detection_model,
configs,
eval_input,
use_tpu=use_tpu,
postprocess_on_cpu=postprocess_on_cpu,
global_step=global_step,
)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_lib_v2.py | model_lib_v2.py |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates and runs TF2 object detection models.
For local training/evaluation run:
PIPELINE_CONFIG_PATH=path/to/pipeline.config
MODEL_DIR=/tmp/model_outputs
NUM_TRAIN_STEPS=10000
SAMPLE_1_OF_N_EVAL_EXAMPLES=1
python model_main_tf2.py -- \
--model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \
--sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \
--pipeline_config_path=$PIPELINE_CONFIG_PATH \
--alsologtostderr
"""
from absl import flags
import tensorflow.compat.v2 as tf
from object_detection import model_lib_v2
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train '
'data (only supported in distributed training).')
flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string(
'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '
'`checkpoint_dir` is provided, this binary operates in eval-only mode, '
'writing resulting metrics to `model_dir`.')
flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an'
'evaluation checkpoint before exiting.')
flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.')
flags.DEFINE_string(
'tpu_name',
default=None,
help='Name of the Cloud TPU for Cluster Resolvers.')
flags.DEFINE_integer(
'num_workers', 1, 'When num_workers > 1, training uses '
'MultiWorkerMirroredStrategy. When num_workers = 1 it uses '
'MirroredStrategy.')
flags.DEFINE_integer(
'checkpoint_every_n', 1000, 'Integer defining how often we checkpoint.')
flags.DEFINE_boolean('record_summaries', True,
('Whether or not to record summaries defined by the model'
' or the training pipeline. This does not impact the'
' summaries of the loss values which are always'
' recorded.'))
FLAGS = flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tf.config.set_soft_device_placement(True)
if FLAGS.checkpoint_dir:
model_lib_v2.eval_continuously(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
checkpoint_dir=FLAGS.checkpoint_dir,
wait_interval=300, timeout=FLAGS.eval_timeout)
else:
if FLAGS.use_tpu:
# TPU is automatically inferred if tpu_name is None and
# we are running under cloud ai-platform.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif FLAGS.num_workers > 1:
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
else:
strategy = tf.compat.v2.distribute.MirroredStrategy()
with strategy.scope():
model_lib_v2.train_loop(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
use_tpu=FLAGS.use_tpu,
checkpoint_every_n=FLAGS.checkpoint_every_n,
record_summaries=FLAGS.record_summaries)
if __name__ == '__main__':
tf.compat.v1.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_main_tf2.py | model_main_tf2.py |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Exports an SSD detection model to use with tf-lite.
Outputs file:
* A tflite compatible frozen graph - $output_directory/tflite_graph.pb
The exported graph has the following input and output nodes.
Inputs:
'normalized_input_image_tensor': a float32 tensor of shape
[1, height, width, 3] containing the normalized input image. Note that the
height and width must be compatible with the height and width configured in
the fixed_shape_image resizer options in the pipeline config proto.
In floating point Mobilenet model, 'normalized_image_tensor' has values
between [-1,1). This typically means mapping each pixel (linearly)
to a value between [-1, 1]. Input image
values between 0 and 255 are scaled by (1/128.0) and then a value of
-1 is added to them to ensure the range is [-1,1).
In quantized Mobilenet model, 'normalized_image_tensor' has values between [0,
255].
In general, see the `preprocess` function defined in the feature extractor class
in the object_detection/models directory.
Outputs:
If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected boxes
else:
the graph has two outputs:
'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
containing the encoded box predictions.
'raw_outputs/class_predictions': a float32 tensor of shape
[1, num_anchors, num_classes] containing the class scores for each anchor
after applying score conversion.
Example Usage:
--------------
python object_detection/export_tflite_ssd_graph.py \
--pipeline_config_path path/to/ssd_mobilenet.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
The expected output would be in the directory
path/to/exported_model_directory (which is created if it does not exist)
with contents:
- tflite_graph.pbtxt
- tflite_graph.pb
Config overrides (see the `config_override` flag) are text protobufs
(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
certain fields in the provided pipeline_config_path. These are useful for
making small changes to the inference graph that differ from the training or
eval config.
Example Usage (in which we change the NMS iou_threshold to be 0.5 and
NMS score_threshold to be 0.0):
python object_detection/export_tflite_ssd_graph.py \
--pipeline_config_path path/to/ssd_mobilenet.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
--config_override " \
model{ \
ssd{ \
post_processing { \
batch_non_max_suppression { \
score_threshold: 0.0 \
iou_threshold: 0.5 \
} \
} \
} \
} \
"
"""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection import export_tflite_ssd_graph_lib
from object_detection.protos import pipeline_pb2
flags = tf.app.flags
flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
flags.DEFINE_string(
'pipeline_config_path', None,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.')
flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.')
flags.DEFINE_integer('max_detections', 10,
'Maximum number of detections (boxes) to show.')
flags.DEFINE_integer('max_classes_per_detection', 1,
'Maximum number of classes to output per detection box.')
flags.DEFINE_integer(
'detections_per_class', 100,
'Number of anchors used per class in Regular Non-Max-Suppression.')
flags.DEFINE_bool('add_postprocessing_op', True,
'Add TFLite custom op for postprocessing to the graph.')
flags.DEFINE_bool(
'use_regular_nms', False,
'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.')
flags.DEFINE_string(
'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig '
'text proto to override pipeline_config_path.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
flags.mark_flag_as_required('output_directory')
flags.mark_flag_as_required('pipeline_config_path')
flags.mark_flag_as_required('trained_checkpoint_prefix')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
text_format.Merge(FLAGS.config_override, pipeline_config)
export_tflite_ssd_graph_lib.export_tflite_graph(
pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory,
FLAGS.add_postprocessing_op, FLAGS.max_detections,
FLAGS.max_classes_per_detection, use_regular_nms=FLAGS.use_regular_nms)
if __name__ == '__main__':
tf.app.run(main)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/export_tflite_ssd_graph.py | export_tflite_ssd_graph.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameters for the object detection model in TF.learn.
This file consolidates and documents the hyperparameters used by the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import training as contrib_training
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
def create_hparams(hparams_overrides=None):
"""Returns hyperparameters, including any flag value overrides.
Args:
hparams_overrides: Optional hparams overrides, represented as a
string containing comma-separated hparam_name=value pairs.
Returns:
The hyperparameters as a tf.HParams object.
"""
hparams = contrib_training.HParams(
# Whether a fine tuning checkpoint (provided in the pipeline config)
# should be loaded for training.
load_pretrained=True)
# Override any of the preceding hyperparameter values.
if hparams_overrides:
hparams = hparams.parse(hparams_overrides)
return hparams
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_hparams.py | model_hparams.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object detection model library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection import exporter_lib_v2
from object_detection import inputs
from object_detection import model_lib_v2
from object_detection.core import model
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-importing-member,g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top
# Model for test. Current options are:
# 'ssd_mobilenet_v2_pets_keras'
MODEL_NAME_FOR_TEST = 'ssd_mobilenet_v2_pets_keras'
def _get_data_path():
"""Returns an absolute path to TFRecord file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data',
'pets_examples.record')
def get_pipeline_config_path(model_name):
"""Returns path to the local pipeline config file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'samples',
'configs', model_name + '.config')
def _get_labelmap_path():
"""Returns an absolute path to label map file."""
return os.path.join(tf.resource_loader.get_data_files_path(), 'data',
'pet_label_map.pbtxt')
def _get_config_kwarg_overrides():
"""Returns overrides to the configs that insert the correct local paths."""
data_path = _get_data_path()
label_map_path = _get_labelmap_path()
return {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path,
'train_input_reader': {'batch_size': 1}
}
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ModelLibTest(tf.test.TestCase):
@classmethod
def setUpClass(cls): # pylint:disable=g-missing-super-call
tf.keras.backend.clear_session()
def test_train_loop_then_eval_loop(self):
"""Tests that Estimator and input function are constructed correctly."""
model_dir = tf.test.get_temp_dir()
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
new_pipeline_config_path = os.path.join(model_dir, 'new_pipeline.config')
config_util.clear_fine_tune_checkpoint(pipeline_config_path,
new_pipeline_config_path)
config_kwarg_overrides = _get_config_kwarg_overrides()
train_steps = 2
strategy = tf2.distribute.MirroredStrategy(['/cpu:0', '/cpu:1'])
with strategy.scope():
model_lib_v2.train_loop(
new_pipeline_config_path,
model_dir=model_dir,
train_steps=train_steps,
checkpoint_every_n=1,
num_steps_per_iteration=1,
**config_kwarg_overrides)
model_lib_v2.eval_continuously(
new_pipeline_config_path,
model_dir=model_dir,
checkpoint_dir=model_dir,
train_steps=train_steps,
wait_interval=1,
timeout=10,
**config_kwarg_overrides)
class SimpleModel(model.DetectionModel):
"""A model with a single weight vector."""
def __init__(self, num_classes=1):
super(SimpleModel, self).__init__(num_classes)
self.weight = tf.keras.backend.variable(np.ones(10), name='weight')
def postprocess(self, prediction_dict, true_image_shapes):
return {}
def updates(self):
return []
def restore_map(self, *args, **kwargs):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
return {'model': self}
def preprocess(self, _):
return tf.zeros((1, 128, 128, 3)), tf.constant([[128, 128, 3]])
def provide_groundtruth(self, *args, **kwargs):
pass
def predict(self, pred_inputs, true_image_shapes):
return {'prediction':
tf.abs(tf.reduce_sum(self.weight) * tf.reduce_sum(pred_inputs))}
def loss(self, prediction_dict, _):
return {'loss': tf.reduce_sum(prediction_dict['prediction'])}
def regularization_losses(self):
return []
def fake_model_builder(*_, **__):
return SimpleModel()
FAKE_BUILDER_MAP = {'detection_model_fn_base': fake_model_builder}
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ModelCheckpointTest(tf.test.TestCase):
"""Test for model checkpoint related functionality."""
def test_checkpoint_max_to_keep(self):
"""Test that only the most recent checkpoints are kept."""
strategy = tf2.distribute.OneDeviceStrategy(device='/cpu:0')
with mock.patch.dict(
model_lib_v2.MODEL_BUILD_UTIL_MAP, FAKE_BUILDER_MAP):
model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
new_pipeline_config_path = os.path.join(model_dir, 'new_pipeline.config')
config_util.clear_fine_tune_checkpoint(pipeline_config_path,
new_pipeline_config_path)
config_kwarg_overrides = _get_config_kwarg_overrides()
with strategy.scope():
model_lib_v2.train_loop(
new_pipeline_config_path, model_dir=model_dir,
train_steps=5, checkpoint_every_n=2, checkpoint_max_to_keep=3,
num_steps_per_iteration=1, **config_kwarg_overrides
)
ckpt_files = tf.io.gfile.glob(os.path.join(model_dir, 'ckpt-*.index'))
self.assertEqual(len(ckpt_files), 3,
'{} not of length 3.'.format(ckpt_files))
class IncompatibleModel(SimpleModel):
def restore_from_objects(self, *args, **kwargs):
return {'weight': self.weight}
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CheckpointV2Test(tf.test.TestCase):
def setUp(self):
super(CheckpointV2Test, self).setUp()
self._model = SimpleModel()
tf.keras.backend.set_value(self._model.weight, np.ones(10) * 42)
ckpt = tf.train.Checkpoint(model=self._model)
self._test_dir = tf.test.get_temp_dir()
self._ckpt_path = ckpt.save(os.path.join(self._test_dir, 'ckpt'))
tf.keras.backend.set_value(self._model.weight, np.ones(10))
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=_get_config_kwarg_overrides())
self._train_input_fn = inputs.create_train_input_fn(
configs['train_config'],
configs['train_input_config'],
configs['model'])
def test_restore_v2(self):
"""Test that restoring a v2 style checkpoint works."""
model_lib_v2.load_fine_tune_checkpoint(
self._model, self._ckpt_path, checkpoint_type='',
checkpoint_version=train_pb2.CheckpointVersion.V2,
run_model_on_dummy_input=True,
input_dataset=self._train_input_fn(),
unpad_groundtruth_tensors=True)
np.testing.assert_allclose(self._model.weight.numpy(), 42)
def test_restore_map_incompatible_error(self):
"""Test that restoring an incompatible restore map causes an error."""
with self.assertRaisesRegex(TypeError,
r'.*received a \(str -> ResourceVariable\).*'):
model_lib_v2.load_fine_tune_checkpoint(
IncompatibleModel(), self._ckpt_path, checkpoint_type='',
checkpoint_version=train_pb2.CheckpointVersion.V2,
run_model_on_dummy_input=True,
input_dataset=self._train_input_fn(),
unpad_groundtruth_tensors=True)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MetricsExportTest(tf.test.TestCase):
@classmethod
def setUpClass(cls): # pylint:disable=g-missing-super-call
tf.keras.backend.clear_session()
def test_export_metrics_json_serializable(self):
"""Tests that Estimator and input function are constructed correctly."""
strategy = tf2.distribute.OneDeviceStrategy(device='/cpu:0')
def export(data, _):
json.dumps(data)
with mock.patch.dict(
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP, FAKE_BUILDER_MAP):
with strategy.scope():
model_dir = tf.test.get_temp_dir()
new_pipeline_config_path = os.path.join(model_dir,
'new_pipeline.config')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
config_util.clear_fine_tune_checkpoint(pipeline_config_path,
new_pipeline_config_path)
train_steps = 2
with strategy.scope():
model_lib_v2.train_loop(
new_pipeline_config_path,
model_dir=model_dir,
train_steps=train_steps,
checkpoint_every_n=100,
performance_summary_exporter=export,
num_steps_per_iteration=1,
**_get_config_kwarg_overrides())
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_lib_tf2_test.py | model_lib_tf2_test.py |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to export TFLite-compatible SavedModel from TF2 detection models."""
import os
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from object_detection.builders import model_builder
from object_detection.builders import post_processing_builder
from object_detection.core import box_list
from object_detection.core import standard_fields as fields
_DEFAULT_NUM_CHANNELS = 3
_DEFAULT_NUM_COORD_BOX = 4
_MAX_CLASSES_PER_DETECTION = 1
_DETECTION_POSTPROCESS_FUNC = 'TFLite_Detection_PostProcess'
def get_const_center_size_encoded_anchors(anchors):
"""Exports center-size encoded anchors as a constant tensor.
Args:
anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor
boxes
Returns:
encoded_anchors: a float32 constant tensor of shape [num_anchors, 4]
containing the anchor boxes.
"""
anchor_boxlist = box_list.BoxList(anchors)
y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()
num_anchors = y.get_shape().as_list()
with tf1.Session() as sess:
y_out, x_out, h_out, w_out = sess.run([y, x, h, w])
encoded_anchors = tf1.constant(
np.transpose(np.stack((y_out, x_out, h_out, w_out))),
dtype=tf1.float32,
shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],
name='anchors')
return num_anchors[0], encoded_anchors
class SSDModule(tf.Module):
"""Inference Module for TFLite-friendly SSD models."""
def __init__(self, pipeline_config, detection_model, max_detections,
use_regular_nms):
"""Initialization.
Args:
pipeline_config: The original pipeline_pb2.TrainEvalPipelineConfig
detection_model: The detection model to use for inference.
max_detections: Max detections desired from the TFLite model.
use_regular_nms: If True, TFLite model uses the (slower) multi-class NMS.
"""
self._process_config(pipeline_config)
self._pipeline_config = pipeline_config
self._model = detection_model
self._max_detections = max_detections
self._use_regular_nms = use_regular_nms
def _process_config(self, pipeline_config):
self._num_classes = pipeline_config.model.ssd.num_classes
self._nms_score_threshold = pipeline_config.model.ssd.post_processing.batch_non_max_suppression.score_threshold
self._nms_iou_threshold = pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold
self._scale_values = {}
self._scale_values[
'y_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale
self._scale_values[
'x_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale
self._scale_values[
'h_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale
self._scale_values[
'w_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale
image_resizer_config = pipeline_config.model.ssd.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
self._num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
self._height = image_resizer_config.fixed_shape_resizer.height
self._width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
self._num_channels = 1
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(
image_resizer_config.WhichOneof('image_resizer_oneof')))
def input_shape(self):
"""Returns shape of TFLite model input."""
return [1, self._height, self._width, self._num_channels]
def postprocess_implements_signature(self):
"""Returns tf.implements signature for MLIR legalization of TFLite NMS."""
implements_signature = [
'name: "%s"' % _DETECTION_POSTPROCESS_FUNC,
'attr { key: "max_detections" value { i: %d } }' % self._max_detections,
'attr { key: "max_classes_per_detection" value { i: %d } }' %
_MAX_CLASSES_PER_DETECTION,
'attr { key: "use_regular_nms" value { b: %s } }' %
str(self._use_regular_nms).lower(),
'attr { key: "nms_score_threshold" value { f: %f } }' %
self._nms_score_threshold,
'attr { key: "nms_iou_threshold" value { f: %f } }' %
self._nms_iou_threshold,
'attr { key: "y_scale" value { f: %f } }' %
self._scale_values['y_scale'],
'attr { key: "x_scale" value { f: %f } }' %
self._scale_values['x_scale'],
'attr { key: "h_scale" value { f: %f } }' %
self._scale_values['h_scale'],
'attr { key: "w_scale" value { f: %f } }' %
self._scale_values['w_scale'],
'attr { key: "num_classes" value { i: %d } }' % self._num_classes
]
implements_signature = ' '.join(implements_signature)
return implements_signature
def _get_postprocess_fn(self, num_anchors, num_classes):
# There is no TF equivalent for TFLite's custom post-processing op.
# So we add an 'empty' composite function here, that is legalized to the
# custom op with MLIR.
@tf.function(
experimental_implements=self.postprocess_implements_signature())
# pylint: disable=g-unused-argument,unused-argument
def dummy_post_processing(box_encodings, class_predictions, anchors):
boxes = tf.constant(0.0, dtype=tf.float32, name='boxes')
scores = tf.constant(0.0, dtype=tf.float32, name='scores')
classes = tf.constant(0.0, dtype=tf.float32, name='classes')
num_detections = tf.constant(0.0, dtype=tf.float32, name='num_detections')
return boxes, classes, scores, num_detections
return dummy_post_processing
@tf.function
def inference_fn(self, image):
"""Encapsulates SSD inference for TFLite conversion.
NOTE: The Args & Returns sections below indicate the TFLite model signature,
and not what the TF graph does (since the latter does not include the custom
NMS op used by TFLite)
Args:
image: a float32 tensor of shape [num_anchors, 4] containing the anchor
boxes
Returns:
num_detections: a float32 scalar denoting number of total detections.
classes: a float32 tensor denoting class ID for each detection.
scores: a float32 tensor denoting score for each detection.
boxes: a float32 tensor denoting coordinates of each detected box.
"""
predicted_tensors = self._model.predict(image, true_image_shapes=None)
# The score conversion occurs before the post-processing custom op
_, score_conversion_fn = post_processing_builder.build(
self._pipeline_config.model.ssd.post_processing)
class_predictions = score_conversion_fn(
predicted_tensors['class_predictions_with_background'])
with tf.name_scope('raw_outputs'):
# 'raw_outputs/box_encodings': a float32 tensor of shape
# [1, num_anchors, 4] containing the encoded box predictions. Note that
# these are raw predictions and no Non-Max suppression is applied on
# them and no decode center size boxes is applied to them.
box_encodings = tf.identity(
predicted_tensors['box_encodings'], name='box_encodings')
# 'raw_outputs/class_predictions': a float32 tensor of shape
# [1, num_anchors, num_classes] containing the class scores for each
# anchor after applying score conversion.
class_predictions = tf.identity(
class_predictions, name='class_predictions')
# 'anchors': a float32 tensor of shape
# [4, num_anchors] containing the anchors as a constant node.
num_anchors, anchors = get_const_center_size_encoded_anchors(
predicted_tensors['anchors'])
anchors = tf.identity(anchors, name='anchors')
# tf.function@ seems to reverse order of inputs, so reverse them here.
return self._get_postprocess_fn(num_anchors,
self._num_classes)(box_encodings,
class_predictions,
anchors)[::-1]
class CenterNetModule(tf.Module):
"""Inference Module for TFLite-friendly CenterNet models.
The exported CenterNet model includes the preprocessing and postprocessing
logics so the caller should pass in the raw image pixel values. It supports
both object detection and keypoint estimation task.
"""
def __init__(self, pipeline_config, max_detections, include_keypoints,
label_map_path=''):
"""Initialization.
Args:
pipeline_config: The original pipeline_pb2.TrainEvalPipelineConfig
max_detections: Max detections desired from the TFLite model.
include_keypoints: If set true, the output dictionary will include the
keypoint coordinates and keypoint confidence scores.
label_map_path: Path to the label map which is used by CenterNet keypoint
estimation task. If provided, the label_map_path in the configuration
will be replaced by this one.
"""
self._max_detections = max_detections
self._include_keypoints = include_keypoints
self._process_config(pipeline_config)
if include_keypoints and label_map_path:
pipeline_config.model.center_net.keypoint_label_map_path = label_map_path
self._pipeline_config = pipeline_config
self._model = model_builder.build(
self._pipeline_config.model, is_training=False)
def get_model(self):
return self._model
def _process_config(self, pipeline_config):
self._num_classes = pipeline_config.model.center_net.num_classes
center_net_config = pipeline_config.model.center_net
image_resizer_config = center_net_config.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
self._num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
self._height = image_resizer_config.fixed_shape_resizer.height
self._width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
self._num_channels = 1
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(image_resizer))
center_net_config.object_center_params.max_box_predictions = (
self._max_detections)
if not self._include_keypoints:
del center_net_config.keypoint_estimation_task[:]
def input_shape(self):
"""Returns shape of TFLite model input."""
return [1, self._height, self._width, self._num_channels]
@tf.function
def inference_fn(self, image):
"""Encapsulates CenterNet inference for TFLite conversion.
Args:
image: a float32 tensor of shape [1, image_height, image_width, channel]
denoting the image pixel values.
Returns:
A dictionary of predicted tensors:
classes: a float32 tensor with shape [1, max_detections] denoting class
ID for each detection.
scores: a float32 tensor with shape [1, max_detections] denoting score
for each detection.
boxes: a float32 tensor with shape [1, max_detections, 4] denoting
coordinates of each detected box.
keypoints: a float32 with shape [1, max_detections, num_keypoints, 2]
denoting the predicted keypoint coordinates (normalized in between
0-1). Note that [:, :, :, 0] represents the y coordinates and
[:, :, :, 1] represents the x coordinates.
keypoint_scores: a float32 with shape [1, max_detections, num_keypoints]
denoting keypoint confidence scores.
"""
image = tf.cast(image, tf.float32)
image, shapes = self._model.preprocess(image)
prediction_dict = self._model.predict(image, None)
detections = self._model.postprocess(
prediction_dict, true_image_shapes=shapes)
field_names = fields.DetectionResultFields
classes_field = field_names.detection_classes
classes = tf.cast(detections[classes_field], tf.float32)
num_detections = tf.cast(detections[field_names.num_detections], tf.float32)
if self._include_keypoints:
model_outputs = (detections[field_names.detection_boxes], classes,
detections[field_names.detection_scores], num_detections,
detections[field_names.detection_keypoints],
detections[field_names.detection_keypoint_scores])
else:
model_outputs = (detections[field_names.detection_boxes], classes,
detections[field_names.detection_scores], num_detections)
# tf.function@ seems to reverse order of inputs, so reverse them here.
return model_outputs[::-1]
def export_tflite_model(pipeline_config, trained_checkpoint_dir,
output_directory, max_detections, use_regular_nms,
include_keypoints=False, label_map_path=''):
"""Exports inference SavedModel for TFLite conversion.
NOTE: Only supports SSD meta-architectures for now, and the output model will
have static-shaped, single-batch input.
This function creates `output_directory` if it does not already exist,
which will hold the intermediate SavedModel that can be used with the TFLite
converter.
Args:
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_dir: Path to the trained checkpoint file.
output_directory: Path to write outputs.
max_detections: Max detections desired from the TFLite model.
use_regular_nms: If True, TFLite model uses the (slower) multi-class NMS.
Note that this argument is only used by the SSD model.
include_keypoints: Decides whether to also output the keypoint predictions.
Note that this argument is only used by the CenterNet model.
label_map_path: Path to the label map which is used by CenterNet keypoint
estimation task. If provided, the label_map_path in the configuration will
be replaced by this one.
Raises:
ValueError: if pipeline is invalid.
"""
output_saved_model_directory = os.path.join(output_directory, 'saved_model')
# Build the underlying model using pipeline config.
# TODO(b/162842801): Add support for other architectures.
if pipeline_config.model.WhichOneof('model') == 'ssd':
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
ckpt = tf.train.Checkpoint(model=detection_model)
# The module helps build a TF SavedModel appropriate for TFLite conversion.
detection_module = SSDModule(pipeline_config, detection_model,
max_detections, use_regular_nms)
elif pipeline_config.model.WhichOneof('model') == 'center_net':
detection_module = CenterNetModule(
pipeline_config, max_detections, include_keypoints,
label_map_path=label_map_path)
ckpt = tf.train.Checkpoint(model=detection_module.get_model())
else:
raise ValueError('Only ssd or center_net models are supported in tflite. '
'Found {} in config'.format(
pipeline_config.model.WhichOneof('model')))
manager = tf.train.CheckpointManager(
ckpt, trained_checkpoint_dir, max_to_keep=1)
status = ckpt.restore(manager.latest_checkpoint).expect_partial()
# Getting the concrete function traces the graph and forces variables to
# be constructed; only after this can we save the saved model.
status.assert_existing_objects_matched()
concrete_function = detection_module.inference_fn.get_concrete_function(
tf.TensorSpec(
shape=detection_module.input_shape(), dtype=tf.float32, name='input'))
status.assert_existing_objects_matched()
# Export SavedModel.
tf.saved_model.save(
detection_module,
output_saved_model_directory,
signatures=concrete_function)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/export_tflite_graph_lib_tf2.py | export_tflite_graph_lib_tf2.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary to run train and evaluation on object detection model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from object_detection import model_lib
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job. Note '
'that one call only use this in eval-only mode, and '
'`checkpoint_dir` must be supplied.')
flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '
'`checkpoint_dir` is provided, this binary operates in eval-only mode, '
'writing resulting metrics to `model_dir`.')
flags.DEFINE_boolean(
'run_once', False, 'If running in eval-only mode, whether to run just '
'one round of eval vs running continuously (default).'
)
flags.DEFINE_integer(
'max_eval_retries', 0, 'If running continuous eval, the maximum number of '
'retries upon encountering tf.errors.InvalidArgumentError. If negative, '
'will always retry the evaluation.'
)
FLAGS = flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
pipeline_config_path=FLAGS.pipeline_config_path,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples))
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
if FLAGS.checkpoint_dir:
if FLAGS.eval_training_data:
name = 'training_data'
input_fn = eval_on_train_input_fn
else:
name = 'validation_data'
# The first eval input will be evaluated.
input_fn = eval_input_fns[0]
if FLAGS.run_once:
estimator.evaluate(input_fn,
steps=None,
checkpoint_path=tf.train.latest_checkpoint(
FLAGS.checkpoint_dir))
else:
model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn,
train_steps, name, FLAGS.max_eval_retries)
else:
train_spec, eval_specs = model_lib.create_train_and_eval_specs(
train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False)
# Currently only a single Eval Spec is allowed.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/model_main.py | model_main.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.export_inference_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import six
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.tools import strip_unused_lib
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import pipeline_pb2
from object_detection.utils import ops
from object_detection.utils import tf_version
from object_detection.utils import variables_helper
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock # pylint: disable=g-import-not-at-top, g-importing-member
# pylint: disable=g-import-not-at-top
try:
import tf_slim as slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self, add_detection_keypoints=False, add_detection_masks=False,
add_detection_features=False):
self._add_detection_keypoints = add_detection_keypoints
self._add_detection_masks = add_detection_masks
self._add_detection_features = add_detection_features
def preprocess(self, inputs):
true_image_shapes = [] # Doesn't matter for the fake model.
return tf.identity(inputs), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]], tf.float32),
'detection_scores': tf.constant([[0.7, 0.6],
[0.9, 0.0]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.3, 0.7], [0.4, 0.6]],
[[0.1, 0.9], [0.0, 0.0]]],
tf.float32),
'detection_classes': tf.constant([[0, 1],
[1, 0]], tf.float32),
'num_detections': tf.constant([2, 1], tf.float32),
'raw_detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, 0.0, 0.5]]],
tf.float32),
'raw_detection_scores': tf.constant([[0.7, 0.6],
[0.9, 0.5]], tf.float32),
}
if self._add_detection_keypoints:
postprocessed_tensors['detection_keypoints'] = tf.constant(
np.arange(48).reshape([2, 2, 6, 2]), tf.float32)
if self._add_detection_masks:
postprocessed_tensors['detection_masks'] = tf.constant(
np.arange(64).reshape([2, 2, 4, 4]), tf.float32)
if self._add_detection_features:
# let fake detection features have shape [4, 4, 10]
postprocessed_tensors['detection_features'] = tf.constant(
np.ones((2, 2, 4, 4, 10)), tf.float32)
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class ExportInferenceGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self,
checkpoint_path,
use_moving_averages,
enable_quantization=False):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel()
preprocessed_inputs, true_image_shapes = mock_model.preprocess(
tf.placeholder(tf.float32, shape=[None, None, None, 3]))
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
tf.train.get_or_create_global_step()
if enable_quantization:
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_config.quantization.delay = 500000
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _load_inference_graph(self, inference_graph_path, is_binary=True):
od_graph = tf.Graph()
with od_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_graph_path, mode='rb') as fid:
if is_binary:
od_graph_def.ParseFromString(fid.read())
else:
text_format.Parse(fid.read(), od_graph_def)
tf.import_graph_def(od_graph_def, name='')
return od_graph
def _create_tf_example(self, image_array):
with self.test_session():
encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval()
def _bytes_feature(value):
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[six.ensure_binary(value)]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': _bytes_feature(encoded_image),
'image/format': _bytes_feature('jpg'),
'image/source_id': _bytes_feature('image_id')
})).SerializeToString()
return example
def test_export_graph_with_image_tensor_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
def test_write_inference_graph(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
write_inference_graph=True)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'inference_graph.pbtxt')))
def test_export_graph_with_fixed_size_image_tensor_input(self):
input_shape = [1, 320, 320, 3]
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
input_shape=input_shape)
saved_model_path = os.path.join(output_directory, 'saved_model')
self.assertTrue(
os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')))
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
meta_graph = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
signature = meta_graph.signature_def['serving_default']
input_tensor_name = signature.inputs['inputs'].name
image_tensor = od_graph.get_tensor_by_name(input_tensor_name)
self.assertSequenceEqual(image_tensor.get_shape().as_list(),
input_shape)
def test_export_graph_with_tf_example_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
def test_export_graph_with_fixed_size_tf_example_input(self):
input_shape = [1, 320, 320, 3]
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
input_shape=input_shape)
saved_model_path = os.path.join(output_directory, 'saved_model')
self.assertTrue(
os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')))
def test_export_graph_with_encoded_image_string_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='encoded_image_string_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
def test_export_graph_with_fixed_size_encoded_image_string_input(self):
input_shape = [1, 320, 320, 3]
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='encoded_image_string_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
input_shape=input_shape)
saved_model_path = os.path.join(output_directory, 'saved_model')
self.assertTrue(
os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')))
def _get_variables_in_checkpoint(self, checkpoint_file):
return set([
var_name
for var_name, _ in tf.train.list_variables(checkpoint_file)])
def test_replace_variable_values_with_moving_averages(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
new_checkpoint_prefix = os.path.join(tmp_dir, 'new.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
graph = tf.Graph()
with graph.as_default():
fake_model = FakeModel()
preprocessed_inputs, true_image_shapes = fake_model.preprocess(
tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3]))
predictions = fake_model.predict(preprocessed_inputs, true_image_shapes)
fake_model.postprocess(predictions, true_image_shapes)
exporter.replace_variable_values_with_moving_averages(
graph, trained_checkpoint_prefix, new_checkpoint_prefix)
expected_variables = set(['conv2d/bias', 'conv2d/kernel'])
variables_in_old_ckpt = self._get_variables_in_checkpoint(
trained_checkpoint_prefix)
self.assertIn('conv2d/bias/ExponentialMovingAverage',
variables_in_old_ckpt)
self.assertIn('conv2d/kernel/ExponentialMovingAverage',
variables_in_old_ckpt)
variables_in_new_ckpt = self._get_variables_in_checkpoint(
new_checkpoint_prefix)
self.assertTrue(expected_variables.issubset(variables_in_new_ckpt))
self.assertNotIn('conv2d/bias/ExponentialMovingAverage',
variables_in_new_ckpt)
self.assertNotIn('conv2d/kernel/ExponentialMovingAverage',
variables_in_new_ckpt)
def test_export_graph_with_moving_averages(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step'])
actual_variables = set(
[var_name for var_name, _ in tf.train.list_variables(output_directory)])
self.assertTrue(expected_variables.issubset(actual_variables))
def test_export_model_with_quantization_nodes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix,
use_moving_averages=False,
enable_quantization=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'inference_graph.pbtxt')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Merge(
"""graph_rewriter {
quantization {
delay: 50000
activation_bits: 8
weight_bits: 8
}
}""", pipeline_config)
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
write_inference_graph=True)
self._load_inference_graph(inference_graph_path, is_binary=False)
has_quant_nodes = False
for v in variables_helper.get_global_variables_safely():
if six.ensure_str(v.op.name).endswith('act_quant/min'):
has_quant_nodes = True
break
self.assertTrue(has_quant_nodes)
def test_export_model_with_all_output_nodes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True,
add_detection_features=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name('image_tensor:0')
inference_graph.get_tensor_by_name('detection_boxes:0')
inference_graph.get_tensor_by_name('detection_scores:0')
inference_graph.get_tensor_by_name('detection_multiclass_scores:0')
inference_graph.get_tensor_by_name('detection_classes:0')
inference_graph.get_tensor_by_name('detection_keypoints:0')
inference_graph.get_tensor_by_name('detection_masks:0')
inference_graph.get_tensor_by_name('num_detections:0')
inference_graph.get_tensor_by_name('detection_features:0')
def test_export_model_with_detection_only_nodes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=False)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name('image_tensor:0')
inference_graph.get_tensor_by_name('detection_boxes:0')
inference_graph.get_tensor_by_name('detection_scores:0')
inference_graph.get_tensor_by_name('detection_multiclass_scores:0')
inference_graph.get_tensor_by_name('detection_classes:0')
inference_graph.get_tensor_by_name('num_detections:0')
with self.assertRaises(KeyError):
inference_graph.get_tensor_by_name('detection_keypoints:0')
inference_graph.get_tensor_by_name('detection_masks:0')
def test_export_model_with_detection_only_nodes_and_detection_features(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_features=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name('image_tensor:0')
inference_graph.get_tensor_by_name('detection_boxes:0')
inference_graph.get_tensor_by_name('detection_scores:0')
inference_graph.get_tensor_by_name('detection_multiclass_scores:0')
inference_graph.get_tensor_by_name('detection_classes:0')
inference_graph.get_tensor_by_name('num_detections:0')
inference_graph.get_tensor_by_name('detection_features:0')
with self.assertRaises(KeyError):
inference_graph.get_tensor_by_name('detection_keypoints:0')
inference_graph.get_tensor_by_name('detection_masks:0')
def test_export_and_run_inference_with_image_tensor(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
image_tensor = inference_graph.get_tensor_by_name('image_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, classes_np, keypoints_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, keypoints, masks, num_detections],
feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def _create_encoded_image_string(self, image_array_np, encoding_format):
od_graph = tf.Graph()
with od_graph.as_default():
if encoding_format == 'jpg':
encoded_string = tf.image.encode_jpeg(image_array_np)
elif encoding_format == 'png':
encoded_string = tf.image.encode_png(image_array_np)
else:
raise ValueError('Supports only the following formats: `jpg`, `png`')
with self.test_session(graph=od_graph):
return encoded_string.eval()
def test_export_and_run_inference_with_encoded_image_string_tensor(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='encoded_image_string_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
jpg_image_str = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
png_image_str = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), 'png')
with self.test_session(graph=inference_graph) as sess:
image_str_tensor = inference_graph.get_tensor_by_name(
'encoded_image_string_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
multiclass_scores = inference_graph.get_tensor_by_name(
'detection_multiclass_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
for image_str in [jpg_image_str, png_image_str]:
image_str_batch_np = np.hstack([image_str]* 2)
(boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np,
masks_np, num_detections_np) = sess.run(
[
boxes, scores, multiclass_scores, classes, keypoints, masks,
num_detections
],
feed_dict={image_str_tensor: image_str_batch_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]],
[[0.1, 0.9], [0.0, 0.0]]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_raise_runtime_error_on_images_with_different_sizes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='encoded_image_string_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
large_image = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), 'jpg')
small_image = self._create_encoded_image_string(
np.ones((2, 2, 3)).astype(np.uint8), 'jpg')
image_str_batch_np = np.hstack([large_image, small_image])
with self.test_session(graph=inference_graph) as sess:
image_str_tensor = inference_graph.get_tensor_by_name(
'encoded_image_string_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'TensorArray.*shape'):
sess.run(
[boxes, scores, classes, keypoints, masks, num_detections],
feed_dict={image_str_tensor: image_str_batch_np})
def test_export_and_run_inference_with_tf_example(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
inference_graph = self._load_inference_graph(inference_graph_path)
tf_example_np = np.expand_dims(self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8)), axis=0)
with self.test_session(graph=inference_graph) as sess:
tf_example = inference_graph.get_tensor_by_name('tf_example:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, classes_np, keypoints_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, keypoints, masks, num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_write_frozen_graph(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
inference_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
tf.gfile.MakeDirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
outputs, _ = exporter.build_detection_graph(
input_type='tf_example',
detection_model=detection_model,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None)
output_node_names = ','.join(list(outputs.keys()))
saver = tf.train.Saver()
input_saver_def = saver.as_saver_def()
exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=trained_checkpoint_prefix,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph=inference_graph_path,
clear_devices=True,
initializer_nodes='')
inference_graph = self._load_inference_graph(inference_graph_path)
tf_example_np = np.expand_dims(self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8)), axis=0)
with self.test_session(graph=inference_graph) as sess:
tf_example = inference_graph.get_tensor_by_name('tf_example:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0')
masks = inference_graph.get_tensor_by_name('detection_masks:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, classes_np, keypoints_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, keypoints, masks, num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_export_graph_saves_pipeline_file(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=True)
output_directory = os.path.join(tmp_dir, 'output')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
expected_pipeline_path = os.path.join(
output_directory, 'pipeline.config')
self.assertTrue(os.path.exists(expected_pipeline_path))
written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(expected_pipeline_path, 'r') as f:
proto_str = f.read()
text_format.Merge(proto_str, written_pipeline_config)
self.assertProtoEquals(pipeline_config, written_pipeline_config)
def test_export_saved_model_and_run_inference(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
tf_example_np = np.hstack([self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))] * 2)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
meta_graph = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
signature = meta_graph.signature_def['serving_default']
input_tensor_name = signature.inputs['inputs'].name
tf_example = od_graph.get_tensor_by_name(input_tensor_name)
boxes = od_graph.get_tensor_by_name(
signature.outputs['detection_boxes'].name)
scores = od_graph.get_tensor_by_name(
signature.outputs['detection_scores'].name)
multiclass_scores = od_graph.get_tensor_by_name(
signature.outputs['detection_multiclass_scores'].name)
classes = od_graph.get_tensor_by_name(
signature.outputs['detection_classes'].name)
keypoints = od_graph.get_tensor_by_name(
signature.outputs['detection_keypoints'].name)
masks = od_graph.get_tensor_by_name(
signature.outputs['detection_masks'].name)
num_detections = od_graph.get_tensor_by_name(
signature.outputs['num_detections'].name)
(boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np,
masks_np, num_detections_np) = sess.run(
[boxes, scores, multiclass_scores, classes, keypoints, masks,
num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]],
[[0.1, 0.9], [0.0, 0.0]]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_write_saved_model(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.gfile.MakeDirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
outputs, placeholder_tensor = exporter.build_detection_graph(
input_type='tf_example',
detection_model=detection_model,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None)
output_node_names = ','.join(list(outputs.keys()))
saver = tf.train.Saver()
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=trained_checkpoint_prefix,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph='',
clear_devices=True,
initializer_nodes='')
exporter.write_saved_model(
saved_model_path=saved_model_path,
frozen_graph_def=frozen_graph_def,
inputs=placeholder_tensor,
outputs=outputs)
tf_example_np = np.hstack([self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))] * 2)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
meta_graph = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], saved_model_path)
signature = meta_graph.signature_def['serving_default']
input_tensor_name = signature.inputs['inputs'].name
tf_example = od_graph.get_tensor_by_name(input_tensor_name)
boxes = od_graph.get_tensor_by_name(
signature.outputs['detection_boxes'].name)
scores = od_graph.get_tensor_by_name(
signature.outputs['detection_scores'].name)
classes = od_graph.get_tensor_by_name(
signature.outputs['detection_classes'].name)
keypoints = od_graph.get_tensor_by_name(
signature.outputs['detection_keypoints'].name)
masks = od_graph.get_tensor_by_name(
signature.outputs['detection_masks'].name)
num_detections = od_graph.get_tensor_by_name(
signature.outputs['num_detections'].name)
(boxes_np, scores_np, classes_np, keypoints_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, keypoints, masks, num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_export_checkpoint_and_run_inference(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
output_directory = os.path.join(tmp_dir, 'output')
model_path = os.path.join(output_directory, 'model.ckpt')
meta_graph_path = model_path + '.meta'
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory)
tf_example_np = np.hstack([self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))] * 2)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
new_saver = tf.train.import_meta_graph(meta_graph_path)
new_saver.restore(sess, model_path)
tf_example = od_graph.get_tensor_by_name('tf_example:0')
boxes = od_graph.get_tensor_by_name('detection_boxes:0')
scores = od_graph.get_tensor_by_name('detection_scores:0')
classes = od_graph.get_tensor_by_name('detection_classes:0')
keypoints = od_graph.get_tensor_by_name('detection_keypoints:0')
masks = od_graph.get_tensor_by_name('detection_masks:0')
num_detections = od_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, classes_np, keypoints_np, masks_np,
num_detections_np) = sess.run(
[boxes, scores, classes, keypoints, masks, num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_write_graph_and_checkpoint(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(trained_checkpoint_prefix,
use_moving_averages=False)
output_directory = os.path.join(tmp_dir, 'output')
model_path = os.path.join(output_directory, 'model.ckpt')
meta_graph_path = model_path + '.meta'
tf.gfile.MakeDirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(
add_detection_keypoints=True, add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
exporter.build_detection_graph(
input_type='tf_example',
detection_model=detection_model,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None)
saver = tf.train.Saver()
input_saver_def = saver.as_saver_def()
exporter.write_graph_and_checkpoint(
inference_graph_def=tf.get_default_graph().as_graph_def(),
model_path=model_path,
input_saver_def=input_saver_def,
trained_checkpoint_prefix=trained_checkpoint_prefix)
tf_example_np = np.hstack([self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))] * 2)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
new_saver = tf.train.import_meta_graph(meta_graph_path)
new_saver.restore(sess, model_path)
tf_example = od_graph.get_tensor_by_name('tf_example:0')
boxes = od_graph.get_tensor_by_name('detection_boxes:0')
scores = od_graph.get_tensor_by_name('detection_scores:0')
raw_boxes = od_graph.get_tensor_by_name('raw_detection_boxes:0')
raw_scores = od_graph.get_tensor_by_name('raw_detection_scores:0')
classes = od_graph.get_tensor_by_name('detection_classes:0')
keypoints = od_graph.get_tensor_by_name('detection_keypoints:0')
masks = od_graph.get_tensor_by_name('detection_masks:0')
num_detections = od_graph.get_tensor_by_name('num_detections:0')
(boxes_np, scores_np, raw_boxes_np, raw_scores_np, classes_np,
keypoints_np, masks_np, num_detections_np) = sess.run(
[boxes, scores, raw_boxes, raw_scores, classes, keypoints, masks,
num_detections],
feed_dict={tf_example: tf_example_np})
self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(scores_np, [[0.7, 0.6],
[0.9, 0.0]])
self.assertAllClose(raw_boxes_np, [[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, 0.0, 0.5]]])
self.assertAllClose(raw_scores_np, [[0.7, 0.6],
[0.9, 0.5]])
self.assertAllClose(classes_np, [[1, 2],
[2, 1]])
self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2]))
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_rewrite_nn_resize_op(self):
g = tf.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8))
s = ops.nearest_neighbor_upsampling(x, 2)
t = s + y
exporter.rewrite_nn_resize_op()
resize_op_found = False
for op in g.get_operations():
if op.type == 'ResizeNearestNeighbor':
resize_op_found = True
self.assertEqual(op.inputs[0], x)
self.assertEqual(op.outputs[0].consumers()[0], t.op)
break
self.assertTrue(resize_op_found)
def test_rewrite_nn_resize_op_quantized(self):
g = tf.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_conv = slim.conv2d(x, 8, 1)
y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8))
s = ops.nearest_neighbor_upsampling(x_conv, 2)
t = s + y
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_config.quantization.delay = 500000
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
exporter.rewrite_nn_resize_op(is_quantized=True)
resize_op_found = False
for op in g.get_operations():
if op.type == 'ResizeNearestNeighbor':
resize_op_found = True
self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars')
self.assertEqual(op.outputs[0].consumers()[0], t.op)
break
self.assertTrue(resize_op_found)
def test_rewrite_nn_resize_op_odd_size(self):
g = tf.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
s = ops.nearest_neighbor_upsampling(x, 2)
t = s[:, :19, :19, :]
exporter.rewrite_nn_resize_op()
resize_op_found = False
for op in g.get_operations():
if op.type == 'ResizeNearestNeighbor':
resize_op_found = True
self.assertEqual(op.inputs[0], x)
self.assertEqual(op.outputs[0].consumers()[0], t.op)
break
self.assertTrue(resize_op_found)
def test_rewrite_nn_resize_op_quantized_odd_size(self):
g = tf.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_conv = slim.conv2d(x, 8, 1)
s = ops.nearest_neighbor_upsampling(x_conv, 2)
t = s[:, :19, :19, :]
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_config.quantization.delay = 500000
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
exporter.rewrite_nn_resize_op(is_quantized=True)
resize_op_found = False
for op in g.get_operations():
if op.type == 'ResizeNearestNeighbor':
resize_op_found = True
self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars')
self.assertEqual(op.outputs[0].consumers()[0], t.op)
break
self.assertTrue(resize_op_found)
def test_rewrite_nn_resize_op_multiple_path(self):
g = tf.Graph()
with g.as_default():
with tf.name_scope('nearest_upsampling'):
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_stack = tf.stack([tf.stack([x] * 2, axis=3)] * 2, axis=2)
x_reshape = tf.reshape(x_stack, [8, 20, 20, 8])
with tf.name_scope('nearest_upsampling'):
x_2 = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_stack_2 = tf.stack([tf.stack([x_2] * 2, axis=3)] * 2, axis=2)
x_reshape_2 = tf.reshape(x_stack_2, [8, 20, 20, 8])
t = x_reshape + x_reshape_2
exporter.rewrite_nn_resize_op()
graph_def = g.as_graph_def()
graph_def = strip_unused_lib.strip_unused(
graph_def,
input_node_names=[
'nearest_upsampling/Placeholder', 'nearest_upsampling_1/Placeholder'
],
output_node_names=['add'],
placeholder_type_enum=dtypes.float32.as_datatype_enum)
counter_resize_op = 0
t_input_ops = [op.name for op in t.op.inputs]
for node in graph_def.node:
# Make sure Stacks are replaced.
self.assertNotEqual(node.op, 'Pack')
if node.op == 'ResizeNearestNeighbor':
counter_resize_op += 1
self.assertIn(six.ensure_str(node.name) + ':0', t_input_ops)
self.assertEqual(counter_resize_op, 2)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/exporter_tf1_test.py | exporter_tf1_test.py |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import ast
import os
import tensorflow.compat.v2 as tf
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.utils import config_util
INPUT_BUILDER_UTIL_MAP = {
'model_build': model_builder.build,
}
def _decode_image(encoded_image_string_tensor):
image_tensor = tf.image.decode_image(encoded_image_string_tensor,
channels=3)
image_tensor.set_shape((None, None, 3))
return image_tensor
def _decode_tf_example(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
return image_tensor
def _combine_side_inputs(side_input_shapes='',
side_input_types='',
side_input_names=''):
"""Zips the side inputs together.
Args:
side_input_shapes: forward-slash-separated list of comma-separated lists
describing input shapes.
side_input_types: comma-separated list of the types of the inputs.
side_input_names: comma-separated list of the names of the inputs.
Returns:
a zipped list of side input tuples.
"""
side_input_shapes = [
ast.literal_eval('[' + x + ']') for x in side_input_shapes.split('/')
]
side_input_types = eval('[' + side_input_types + ']') # pylint: disable=eval-used
side_input_names = side_input_names.split(',')
return zip(side_input_shapes, side_input_types, side_input_names)
class DetectionInferenceModule(tf.Module):
"""Detection Inference Module."""
def __init__(self, detection_model,
use_side_inputs=False,
zipped_side_inputs=None):
"""Initializes a module for detection.
Args:
detection_model: the detection model to use for inference.
use_side_inputs: whether to use side inputs.
zipped_side_inputs: the zipped side inputs.
"""
self._model = detection_model
def _get_side_input_signature(self, zipped_side_inputs):
sig = []
side_input_names = []
for info in zipped_side_inputs:
sig.append(tf.TensorSpec(shape=info[0],
dtype=info[1],
name=info[2]))
side_input_names.append(info[2])
return sig
def _get_side_names_from_zip(self, zipped_side_inputs):
return [side[2] for side in zipped_side_inputs]
def _preprocess_input(self, batch_input, decode_fn):
# Input preprocessing happends on the CPU. We don't need to use the device
# placement as it is automatically handled by TF.
def _decode_and_preprocess(single_input):
image = decode_fn(single_input)
image = tf.cast(image, tf.float32)
image, true_shape = self._model.preprocess(image[tf.newaxis, :, :, :])
return image[0], true_shape[0]
images, true_shapes = tf.map_fn(
_decode_and_preprocess,
elems=batch_input,
parallel_iterations=32,
back_prop=False,
fn_output_signature=(tf.float32, tf.int32))
return images, true_shapes
def _run_inference_on_images(self, images, true_shapes, **kwargs):
"""Cast image to float and run inference.
Args:
images: float32 Tensor of shape [None, None, None, 3].
true_shapes: int32 Tensor of form [batch, 3]
**kwargs: additional keyword arguments.
Returns:
Tensor dictionary holding detections.
"""
label_id_offset = 1
prediction_dict = self._model.predict(images, true_shapes, **kwargs)
detections = self._model.postprocess(prediction_dict, true_shapes)
classes_field = fields.DetectionResultFields.detection_classes
detections[classes_field] = (
tf.cast(detections[classes_field], tf.float32) + label_id_offset)
for key, val in detections.items():
detections[key] = tf.cast(val, tf.float32)
return detections
class DetectionFromImageModule(DetectionInferenceModule):
"""Detection Inference Module for image inputs."""
def __init__(self, detection_model,
use_side_inputs=False,
zipped_side_inputs=None):
"""Initializes a module for detection.
Args:
detection_model: the detection model to use for inference.
use_side_inputs: whether to use side inputs.
zipped_side_inputs: the zipped side inputs.
"""
if zipped_side_inputs is None:
zipped_side_inputs = []
sig = [tf.TensorSpec(shape=[1, None, None, 3],
dtype=tf.uint8,
name='input_tensor')]
if use_side_inputs:
sig.extend(self._get_side_input_signature(zipped_side_inputs))
self._side_input_names = self._get_side_names_from_zip(zipped_side_inputs)
def call_func(input_tensor, *side_inputs):
kwargs = dict(zip(self._side_input_names, side_inputs))
images, true_shapes = self._preprocess_input(input_tensor, lambda x: x)
return self._run_inference_on_images(images, true_shapes, **kwargs)
self.__call__ = tf.function(call_func, input_signature=sig)
# TODO(kaushikshiv): Check if omitting the signature also works.
super(DetectionFromImageModule, self).__init__(detection_model,
use_side_inputs,
zipped_side_inputs)
def get_true_shapes(input_tensor):
input_shape = tf.shape(input_tensor)
batch = input_shape[0]
image_shape = input_shape[1:]
true_shapes = tf.tile(image_shape[tf.newaxis, :], [batch, 1])
return true_shapes
class DetectionFromFloatImageModule(DetectionInferenceModule):
"""Detection Inference Module for float image inputs."""
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, None, 3], dtype=tf.float32)])
def __call__(self, input_tensor):
images, true_shapes = self._preprocess_input(input_tensor, lambda x: x)
return self._run_inference_on_images(images,
true_shapes)
class DetectionFromEncodedImageModule(DetectionInferenceModule):
"""Detection Inference Module for encoded image string inputs."""
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def __call__(self, input_tensor):
images, true_shapes = self._preprocess_input(input_tensor, _decode_image)
return self._run_inference_on_images(images, true_shapes)
class DetectionFromTFExampleModule(DetectionInferenceModule):
"""Detection Inference Module for TF.Example inputs."""
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def __call__(self, input_tensor):
images, true_shapes = self._preprocess_input(input_tensor,
_decode_tf_example)
return self._run_inference_on_images(images, true_shapes)
def export_inference_graph(input_type,
pipeline_config,
trained_checkpoint_dir,
output_directory,
use_side_inputs=False,
side_input_shapes='',
side_input_types='',
side_input_names=''):
"""Exports inference graph for the model specified in the pipeline config.
This function creates `output_directory` if it does not already exist,
which will hold a copy of the pipeline config with filename `pipeline.config`,
and two subdirectories named `checkpoint` and `saved_model`
(containing the exported checkpoint and SavedModel respectively).
Args:
input_type: Type of input for the graph. Can be one of ['image_tensor',
'encoded_image_string_tensor', 'tf_example'].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_dir: Path to the trained checkpoint file.
output_directory: Path to write outputs.
use_side_inputs: boolean that determines whether side inputs should be
included in the input signature.
side_input_shapes: forward-slash-separated list of comma-separated lists
describing input shapes.
side_input_types: comma-separated list of the types of the inputs.
side_input_names: comma-separated list of the names of the inputs.
Raises:
ValueError: if input_type is invalid.
"""
output_checkpoint_directory = os.path.join(output_directory, 'checkpoint')
output_saved_model_directory = os.path.join(output_directory, 'saved_model')
detection_model = INPUT_BUILDER_UTIL_MAP['model_build'](
pipeline_config.model, is_training=False)
ckpt = tf.train.Checkpoint(
model=detection_model)
manager = tf.train.CheckpointManager(
ckpt, trained_checkpoint_dir, max_to_keep=1)
status = ckpt.restore(manager.latest_checkpoint).expect_partial()
if input_type not in DETECTION_MODULE_MAP:
raise ValueError('Unrecognized `input_type`')
if use_side_inputs and input_type != 'image_tensor':
raise ValueError('Side inputs supported for image_tensor input type only.')
zipped_side_inputs = []
if use_side_inputs:
zipped_side_inputs = _combine_side_inputs(side_input_shapes,
side_input_types,
side_input_names)
detection_module = DETECTION_MODULE_MAP[input_type](detection_model,
use_side_inputs,
list(zipped_side_inputs))
# Getting the concrete function traces the graph and forces variables to
# be constructed --- only after this can we save the checkpoint and
# saved model.
concrete_function = detection_module.__call__.get_concrete_function()
status.assert_existing_objects_matched()
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, output_checkpoint_directory, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
tf.saved_model.save(detection_module,
output_saved_model_directory,
signatures=concrete_function)
config_util.save_pipeline_config(pipeline_config, output_directory)
class DetectionFromImageAndBoxModule(DetectionInferenceModule):
"""Detection Inference Module for image with bounding box inputs.
The saved model will require two inputs (image and normalized boxes) and run
per-box mask prediction. To be compatible with this exporter, the detection
model has to implement a called predict_masks_from_boxes(
prediction_dict, true_image_shapes, provided_boxes, **params), where
- prediciton_dict is a dict returned by the predict method.
- true_image_shapes is a tensor of size [batch_size, 3], containing the
true shape of each image in case it is padded.
- provided_boxes is a [batch_size, num_boxes, 4] size tensor containing
boxes specified in normalized coordinates.
"""
def __init__(self,
detection_model,
use_side_inputs=False,
zipped_side_inputs=None):
"""Initializes a module for detection.
Args:
detection_model: the detection model to use for inference.
use_side_inputs: whether to use side inputs.
zipped_side_inputs: the zipped side inputs.
"""
assert hasattr(detection_model, 'predict_masks_from_boxes')
super(DetectionFromImageAndBoxModule,
self).__init__(detection_model, use_side_inputs, zipped_side_inputs)
def _run_segmentation_on_images(self, image, boxes, **kwargs):
"""Run segmentation on images with provided boxes.
Args:
image: uint8 Tensor of shape [1, None, None, 3].
boxes: float32 tensor of shape [1, None, 4] containing normalized box
coordinates.
**kwargs: additional keyword arguments.
Returns:
Tensor dictionary holding detections (including masks).
"""
label_id_offset = 1
image = tf.cast(image, tf.float32)
image, shapes = self._model.preprocess(image)
prediction_dict = self._model.predict(image, shapes, **kwargs)
detections = self._model.predict_masks_from_boxes(prediction_dict, shapes,
boxes)
classes_field = fields.DetectionResultFields.detection_classes
detections[classes_field] = (
tf.cast(detections[classes_field], tf.float32) + label_id_offset)
for key, val in detections.items():
detections[key] = tf.cast(val, tf.float32)
return detections
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8),
tf.TensorSpec(shape=[1, None, 4], dtype=tf.float32)
])
def __call__(self, input_tensor, boxes):
return self._run_segmentation_on_images(input_tensor, boxes)
DETECTION_MODULE_MAP = {
'image_tensor': DetectionFromImageModule,
'encoded_image_string_tensor':
DetectionFromEncodedImageModule,
'tf_example': DetectionFromTFExampleModule,
'float_image_tensor': DetectionFromFloatImageModule,
'image_and_boxes_tensor': DetectionFromImageAndBoxModule,
}
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/exporter_lib_v2.py | exporter_lib_v2.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import unittest
from absl import logging
from absl.testing import parameterized
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection import inputs
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top, g-importing-member
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'samples/configs/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/pet_label_map.pbtxt')
data_path = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/pets_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _get_configs_for_model_sequence_example(model_name, frame_index=-1):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/snapshot_serengeti_label_map.pbtxt')
data_path = os.path.join(
tf.resource_loader.get_data_files_path(),
'test_data/snapshot_serengeti_sequence_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path,
'frame_index': frame_index
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests under TF2.X.')
class InputFnTest(test_case.TestCase, parameterized.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_faster_rcnn_resnet50_train_input_with_additional_channels(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
configs['train_input_config'].num_additional_channels = 2
configs['train_config'].retain_original_images = True
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 5],
features[fields.InputDataFields.image].shape.as_list())
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_context_rcnn_resnet50_train_input_with_sequence_example(
self, train_batch_size=8):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
train_config = configs['train_config']
train_config.batch_size = train_batch_size
train_input_fn = inputs.create_train_input_fn(
train_config, configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([train_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([train_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[train_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[train_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_context_rcnn_resnet50_eval_input_with_sequence_example(
self, eval_batch_size=8):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_context_rcnn_resnet50_eval_input_with_sequence_example_image_id_list(
self, eval_batch_size=8):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_config = configs['eval_input_configs'][0]
eval_input_config.load_context_image_ids = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config, eval_input_config, model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_context_rcnn_resnet50_train_input_with_sequence_example_frame_index(
self, train_batch_size=8):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap', frame_index=2)
model_config = configs['model']
train_config = configs['train_config']
train_config.batch_size = train_batch_size
train_input_fn = inputs.create_train_input_fn(
train_config, configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([train_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([train_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[train_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[train_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_eval_input_with_additional_channels(
self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2 with additional channel.
Args:
eval_batch_size: Batch size for eval set.
"""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
configs['eval_input_configs'][0].num_additional_channels = 1
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_config.retain_original_image_additional_channels = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 4],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size, 300, 300, 1], features[
fields.InputDataFields.image_additional_channels].shape.as_list())
self.assertEqual(
tf.uint8,
features[fields.InputDataFields.image_additional_channels].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(tf.bool,
labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_predict_input_with_additional_channels(self):
"""Tests the predict input function with additional channels."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_input_configs'][0].num_additional_channels = 2
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
# RGB + 2 additional channels = 5 channels.
self.assertEqual([1, 300, 300, 5], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
def test_output_equal_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
test_string = b'hello world'
feed_dict = {string_placeholder: test_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
self.assertEqual(test_string, out_string)
def test_output_is_integer_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
empty_string = ''
feed_dict = {string_placeholder: empty_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
is_integer = True
try:
# Test whether out_string is a string which represents an integer, the
# casting below will throw an error if out_string is not castable to int.
int(out_string)
except ValueError:
is_integer = False
self.assertTrue(is_integer)
def test_force_no_resize(self):
"""Tests the functionality of force_no_reisze option."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_config'].force_no_resize = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model']
)
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['model']
)
features_train, _ = _make_initializable_iterator(
train_input_fn()).get_next()
features_eval, _ = _make_initializable_iterator(
eval_input_fn()).get_next()
images_train, images_eval = features_train['image'], features_eval['image']
self.assertEqual([1, None, None, 3], images_eval.shape.as_list())
self.assertEqual([24, 300, 300, 3], images_train.shape.as_list())
class DataAugmentationFnTest(test_case.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_boxes])
image, groundtruth_boxes = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]])
def test_apply_image_and_box_augmentation_with_scores(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1.0], np.float32)),
fields.InputDataFields.groundtruth_weights:
tf.constant(np.array([0.8], np.float32)),
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes],
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
augmented_tensor_dict[fields.InputDataFields.groundtruth_weights])
(image, groundtruth_boxes,
groundtruth_classes, groundtruth_weights) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]])
self.assertAllClose(groundtruth_classes.shape, [1.0])
self.assertAllClose(groundtruth_weights, [0.8])
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8)),
fields.InputDataFields.groundtruth_instance_mask_weights:
tf.constant([1.0, 0.0], np.float32)
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_instance_masks],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_instance_mask_weights])
image, masks, mask_weights = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllEqual(masks.shape, [2, 20, 20])
self.assertAllClose(mask_weights, [1.0, 0.0])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_keypoints])
image, boxes, keypoints = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(boxes, [[10, 10, 20, 20]])
self.assertAllClose(keypoints, [[[10, 20], [10, 10]]])
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
def _fake_resize50_preprocess_fn(image):
image = image[0]
image, shape = preprocessor.resize_to_range(
image, min_dimension=50, max_dimension=50, pad_to_max_dimension=True)
return tf.expand_dims(image, 0), tf.expand_dims(shape, axis=0)
class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase):
def test_combine_additional_channels_if_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
additional_channels = np.random.rand(4, 4, 2).astype(np.float32)
def graph_fn(image, additional_channels):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.image_additional_channels: additional_channels,
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32)
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=1)
out_tensors = input_transformation_fn(tensor_dict=tensor_dict)
return out_tensors[fields.InputDataFields.image]
out_image = self.execute_cpu(graph_fn, [image, additional_channels])
self.assertAllEqual(out_image.dtype, tf.float32)
self.assertAllEqual(out_image.shape, [4, 4, 5])
self.assertAllClose(out_image, np.concatenate((image, additional_channels),
axis=2))
def test_use_multiclass_scores_when_present(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).
astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.multiclass_scores:
tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return transformed_inputs[fields.InputDataFields.groundtruth_classes]
groundtruth_classes = self.execute_cpu(graph_fn, [])
self.assertAllClose(
np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32),
groundtruth_classes)
@unittest.skipIf(tf_version.is_tf2(), ('Skipping due to different behaviour '
'in TF 2.X'))
def test_use_multiclass_scores_when_not_present(self):
def graph_fn():
zero_num_elements = tf.random.uniform([], minval=0, maxval=1,
dtype=tf.int32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.multiclass_scores: tf.zeros(zero_num_elements),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return transformed_inputs[fields.InputDataFields.groundtruth_classes]
groundtruth_classes = self.execute_cpu(graph_fn, [])
self.assertAllClose(
np.array([[0, 1, 0], [0, 0, 1]], np.float32),
groundtruth_classes)
@parameterized.parameters(
{'labeled_classes': [1, 2]},
{'labeled_classes': []},
{'labeled_classes': [1, -1, 2]} # -1 denotes an unrecognized class
)
def test_use_labeled_classes(self, labeled_classes):
def compute_fn(image, groundtruth_boxes, groundtruth_classes,
groundtruth_labeled_classes):
tensor_dict = {
fields.InputDataFields.image:
image,
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes,
fields.InputDataFields.groundtruth_labeled_classes:
groundtruth_labeled_classes
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3)
return input_transformation_fn(tensor_dict=tensor_dict)
image = np.random.rand(4, 4, 3).astype(np.float32)
groundtruth_boxes = np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)
groundtruth_classes = np.array([1, 2], np.int32)
groundtruth_labeled_classes = np.array(labeled_classes, np.int32)
transformed_inputs = self.execute_cpu(compute_fn, [
image, groundtruth_boxes, groundtruth_classes,
groundtruth_labeled_classes
])
if labeled_classes == [1, 2] or labeled_classes == [1, -1, 2]:
transformed_labeled_classes = [1, 1, 0]
elif not labeled_classes:
transformed_labeled_classes = [1, 1, 1]
else:
logging.exception('Unexpected labeled_classes %r', labeled_classes)
self.assertAllEqual(
np.array(transformed_labeled_classes, np.float32),
transformed_inputs[fields.InputDataFields.groundtruth_labeled_classes])
def test_returns_correct_class_label_encodings(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences])
(groundtruth_classes, groundtruth_confidences) = self.execute_cpu(graph_fn,
[])
self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]])
self.assertAllClose(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]])
def test_returns_correct_labels_with_unrecognized_class(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(
np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.groundtruth_area:
tf.constant(np.array([.5, .4, .3])),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, -1, 1], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(
np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]],
np.float32)),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([[True, True], [False, False], [True, True]]),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(3, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_is_crowd:
tf.constant([False, True, False]),
fields.InputDataFields.groundtruth_difficult:
tf.constant(np.array([0, 0, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_area],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences],
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_visibilities],
transformed_inputs[fields.InputDataFields.
groundtruth_instance_masks],
transformed_inputs[fields.InputDataFields.groundtruth_is_crowd],
transformed_inputs[fields.InputDataFields.groundtruth_difficult])
(groundtruth_classes, num_groundtruth_boxes, groundtruth_area,
groundtruth_confidences, groundtruth_boxes, groundtruth_keypoints,
groundtruth_keypoint_visibilities, groundtruth_instance_masks,
groundtruth_is_crowd, groundtruth_difficult) = self.execute_cpu(graph_fn,
[])
self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]])
self.assertAllEqual(num_groundtruth_boxes, 2)
self.assertAllClose(groundtruth_area, [.5, .3])
self.assertAllEqual(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]])
self.assertAllClose(groundtruth_boxes, [[0, 0, 1, 1], [.5, .5, 1, 1]])
self.assertAllClose(groundtruth_keypoints, [[[.1, .1]], [[.5, .5]]])
self.assertAllEqual(groundtruth_keypoint_visibilities,
[[True, True], [True, True]])
self.assertAllEqual(groundtruth_instance_masks.shape, [2, 4, 4])
self.assertAllEqual(groundtruth_is_crowd, [False, False])
self.assertAllEqual(groundtruth_difficult, [0, 1])
def test_returns_correct_merged_boxes(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences],
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes])
(groundtruth_boxes, groundtruth_classes, groundtruth_confidences,
num_groundtruth_boxes) = self.execute_cpu(graph_fn, [])
self.assertAllClose(
groundtruth_boxes,
[[.5, .5, 1., 1.]])
self.assertAllClose(
groundtruth_classes,
[[1, 0, 1]])
self.assertAllClose(
groundtruth_confidences,
[[1, 0, 1]])
self.assertAllClose(
num_groundtruth_boxes,
1)
def test_returns_correct_groundtruth_confidences_when_input_present(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.groundtruth_confidences:
tf.constant(np.array([1.0, -1.0], np.float32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences])
groundtruth_classes, groundtruth_confidences = self.execute_cpu(graph_fn,
[])
self.assertAllClose(
groundtruth_classes,
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
groundtruth_confidences,
[[0, 0, 1], [-1, 0, 0]])
def test_returns_resized_masks(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.original_image_spatial_shape:
tf.constant(np.array([4, 4], np.int32))
}
def fake_image_resizer_fn(image, masks=None):
resized_image = tf.image.resize_images(image, [8, 8])
results = [resized_image]
if masks is not None:
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
results.append(resized_masks)
results.append(tf.shape(resized_image))
return results
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes,
retain_original_image=True)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.original_image],
transformed_inputs[fields.InputDataFields.
original_image_spatial_shape],
transformed_inputs[fields.InputDataFields.
groundtruth_instance_masks])
(original_image, original_image_shape,
groundtruth_instance_masks) = self.execute_cpu(graph_fn, [])
self.assertEqual(original_image.dtype, np.uint8)
self.assertAllEqual(original_image_shape, [4, 4])
self.assertAllEqual(original_image.shape, [8, 8, 3])
self.assertAllEqual(groundtruth_instance_masks.shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.image],
transformed_inputs[fields.InputDataFields.true_image_shape])
image, true_image_shape = self.execute_cpu(graph_fn, [np_image])
self.assertAllClose(image, np_image / 255.)
self.assertAllClose(true_image_shape, [4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.image],
transformed_inputs[fields.InputDataFields.groundtruth_classes])
image, groundtruth_classes = self.execute_cpu(graph_fn, [np_image])
self.assertAllEqual(image, np_image + 1)
self.assertAllEqual(
groundtruth_classes,
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
transformed_inputs = input_transformation_fn(tensor_dict)
return transformed_inputs[fields.InputDataFields.image]
image = self.execute_cpu(graph_fn, [np_image])
self.assertAllEqual(image, (np_image + 5) * 2)
def test_resize_with_padding(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2]], [[0.3, 0.4]]]),
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_keypoints])
groundtruth_boxes, groundtruth_keypoints = self.execute_cpu(graph_fn, [])
self.assertAllClose(
groundtruth_boxes,
[[.5, .25, 1., .5], [.0, .0, .5, .25]])
self.assertAllClose(
groundtruth_keypoints,
[[[.1, .1]], [[.3, .2]]])
def test_groundtruth_keypoint_weights(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6], [0.7, 0.8]]]),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([[True, False], [True, True]]),
}
num_classes = 3
keypoint_type_weight = [1.0, 2.0]
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
keypoint_type_weight=keypoint_type_weight)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_weights])
groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu(
graph_fn, [])
self.assertAllClose(
groundtruth_keypoints,
[[[0.1, 0.1], [0.3, 0.2]],
[[0.5, 0.3], [0.7, 0.4]]])
self.assertAllClose(
groundtruth_keypoint_weights,
[[1.0, 0.0], [1.0, 2.0]])
def test_groundtruth_keypoint_weights_default(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6], [0.7, 0.8]]]),
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_weights])
groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu(
graph_fn, [])
self.assertAllClose(
groundtruth_keypoints,
[[[0.1, 0.1], [0.3, 0.2]],
[[0.5, 0.3], [0.7, 0.4]]])
self.assertAllClose(
groundtruth_keypoint_weights,
[[1.0, 1.0], [1.0, 1.0]])
def test_groundtruth_dense_pose(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
num_classes = 1
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
transformed_dp_num_points = transformed_inputs[
fields.InputDataFields.groundtruth_dp_num_points]
transformed_dp_part_ids = transformed_inputs[
fields.InputDataFields.groundtruth_dp_part_ids]
transformed_dp_surface_coords = transformed_inputs[
fields.InputDataFields.groundtruth_dp_surface_coords]
return (transformed_dp_num_points, transformed_dp_part_ids,
transformed_dp_surface_coords)
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
self.assertAllEqual(dp_num_points, [0, 2])
self.assertAllEqual(dp_part_ids, [[0, 0], [4, 23]])
self.assertAllClose(
dp_surface_coords,
[[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.1, 0.3, 0.4,], [0.6, 0.4, 0.6, 0.7,]]])
def test_groundtruth_keypoint_depths(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6], [0.7, 0.8]]]),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([[True, False], [True, True]]),
fields.InputDataFields.groundtruth_keypoint_depths:
tf.constant([[1.0, 0.9], [0.8, 0.7]]),
fields.InputDataFields.groundtruth_keypoint_depth_weights:
tf.constant([[0.7, 0.8], [0.9, 1.0]]),
}
num_classes = 3
keypoint_type_weight = [1.0, 2.0]
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
keypoint_type_weight=keypoint_type_weight)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[
fields.InputDataFields.groundtruth_keypoint_depths],
transformed_inputs[
fields.InputDataFields.groundtruth_keypoint_depth_weights])
keypoint_depths, keypoint_depth_weights = self.execute_cpu(graph_fn, [])
self.assertAllClose(
keypoint_depths,
[[1.0, 0.9], [0.8, 0.7]])
self.assertAllClose(
keypoint_depth_weights,
[[0.7, 0.8], [0.9, 1.0]])
class PadInputDataToStaticShapesFnTest(test_case.TestCase):
def test_pad_images_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.random.uniform([3, 3, 3]),
fields.InputDataFields.groundtruth_boxes:
tf.random.uniform([2, 4]),
fields.InputDataFields.groundtruth_classes:
tf.random.uniform([2, 3], minval=0, maxval=2, dtype=tf.int32),
fields.InputDataFields.true_image_shape:
tf.constant([3, 3, 3]),
fields.InputDataFields.original_image_spatial_shape:
tf.constant([3, 3])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.true_image_shape]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape]
.shape.as_list(), [2])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
.shape.as_list(), [3, 4])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_classes]
.shape.as_list(), [3, 3])
def test_clip_boxes_and_classes(self):
def graph_fn():
input_tensor_dict = {
fields.InputDataFields.groundtruth_boxes:
tf.random.uniform([5, 4]),
fields.InputDataFields.groundtruth_classes:
tf.random.uniform([2, 3], maxval=10, dtype=tf.int32),
fields.InputDataFields.num_groundtruth_boxes:
tf.constant(5)
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
return (padded_tensor_dict[fields.InputDataFields.groundtruth_boxes],
padded_tensor_dict[fields.InputDataFields.groundtruth_classes],
padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes])
(groundtruth_boxes, groundtruth_classes,
num_groundtruth_boxes) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(groundtruth_boxes.shape, [3, 4])
self.assertAllEqual(groundtruth_classes.shape, [3, 3])
self.assertEqual(num_groundtruth_boxes, 3)
def test_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 3, 5),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(4, 3, 2),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 5])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_images_and_additional_channels_errors(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(10, 10, 3),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(10, 10, 2),
fields.InputDataFields.original_image:
test_utils.image_with_dynamic_shape(10, 10, 3),
}
with self.assertRaises(ValueError):
_ = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
def test_gray_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 4, 1),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 1])
def test_gray_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 4, 3),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(4, 4, 2),
}
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_keypoints(self):
keypoints = test_utils.keypoints_with_dynamic_shape(10, 16, 4)
visibilities = tf.cast(tf.random.uniform(tf.shape(keypoints)[:-1], minval=0,
maxval=2, dtype=tf.int32), tf.bool)
input_tensor_dict = {
fields.InputDataFields.groundtruth_keypoints:
test_utils.keypoints_with_dynamic_shape(10, 16, 4),
fields.InputDataFields.groundtruth_keypoint_visibilities:
visibilities
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints]
.shape.as_list(), [3, 16, 4])
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.groundtruth_keypoint_visibilities]
.shape.as_list(), [3, 16])
def test_dense_pose(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=1,
spatial_image_shape=[128, 128],
max_dp_points=200)
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_num_points]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids]
.shape.as_list(), [3, 200])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_surface_coords]
.shape.as_list(), [3, 200, 4])
def test_pad_input_data_to_static_shapes_for_trackid(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_track_ids:
tf.constant([0, 1], dtype=tf.int32),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=1,
spatial_image_shape=[128, 128])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_track_ids]
.shape.as_list(), [3])
def test_context_features(self):
context_memory_size = 8
context_feature_length = 10
max_num_context_features = 20
def graph_fn():
input_tensor_dict = {
fields.InputDataFields.context_features:
tf.ones([context_memory_size, context_feature_length]),
fields.InputDataFields.context_feature_length:
tf.constant(context_feature_length)
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6],
max_num_context_features=max_num_context_features,
context_feature_length=context_feature_length)
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.context_features].shape.as_list(),
[max_num_context_features, context_feature_length])
return padded_tensor_dict[fields.InputDataFields.valid_context_size]
valid_context_size = self.execute_cpu(graph_fn, [])
self.assertEqual(valid_context_size, context_memory_size)
class NegativeSizeTest(test_case.TestCase):
"""Test for inputs and related funcitons."""
def test_negative_size_error(self):
"""Test that error is raised for negative size boxes."""
def graph_fn():
tensors = {
fields.InputDataFields.image: tf.zeros((128, 128, 3)),
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32),
fields.InputDataFields.groundtruth_boxes:
tf.constant([[0.5, 0.5, 0.4, 0.5]], tf.float32)
}
tensors = inputs.transform_input_data(
tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn,
num_classes=10)
return tensors[fields.InputDataFields.groundtruth_boxes]
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute_cpu(graph_fn, [])
def test_negative_size_no_assert(self):
"""Test that negative size boxes are filtered out without assert.
This test simulates the behaviour when we run on TPU and Assert ops are
not supported.
"""
tensors = {
fields.InputDataFields.image: tf.zeros((128, 128, 3)),
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32),
fields.InputDataFields.groundtruth_boxes:
tf.constant([[0.5, 0.5, 0.4, 0.5], [0.5, 0.5, 0.6, 0.6]],
tf.float32)
}
with mock.patch.object(tf, 'Assert') as tf_assert:
tf_assert.return_value = tf.no_op()
tensors = inputs.transform_input_data(
tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn,
num_classes=10)
self.assertAllClose(tensors[fields.InputDataFields.groundtruth_boxes],
[[0.5, 0.5, 0.6, 0.6]])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/inputs_test.py | inputs_test.py |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Exports TF2 detection SavedModel for conversion to TensorFlow Lite.
Link to the TF2 Detection Zoo:
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md
The output folder will contain an intermediate SavedModel that can be used with
the TfLite converter.
NOTE: This only supports SSD meta-architectures for now.
One input:
image: a float32 tensor of shape[1, height, width, 3] containing the
*normalized* input image.
NOTE: See the `preprocess` function defined in the feature extractor class
in the object_detection/models directory.
Four Outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected boxes
Example Usage:
--------------
python object_detection/export_tflite_graph_tf2.py \
--pipeline_config_path path/to/ssd_model/pipeline.config \
--trained_checkpoint_dir path/to/ssd_model/checkpoint \
--output_directory path/to/exported_model_directory
The expected output SavedModel would be in the directory
path/to/exported_model_directory (which is created if it does not exist).
Config overrides (see the `config_override` flag) are text protobufs
(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
certain fields in the provided pipeline_config_path. These are useful for
making small changes to the inference graph that differ from the training or
eval config.
Example Usage 1 (in which we change the NMS iou_threshold to be 0.5 and
NMS score_threshold to be 0.0):
python object_detection/export_tflite_model_tf2.py \
--pipeline_config_path path/to/ssd_model/pipeline.config \
--trained_checkpoint_dir path/to/ssd_model/checkpoint \
--output_directory path/to/exported_model_directory
--config_override " \
model{ \
ssd{ \
post_processing { \
batch_non_max_suppression { \
score_threshold: 0.0 \
iou_threshold: 0.5 \
} \
} \
} \
} \
"
Example Usage 2 (export CenterNet model for keypoint estimation task with fixed
shape resizer and customized input resolution):
python object_detection/export_tflite_model_tf2.py \
--pipeline_config_path path/to/ssd_model/pipeline.config \
--trained_checkpoint_dir path/to/ssd_model/checkpoint \
--output_directory path/to/exported_model_directory \
--keypoint_label_map_path path/to/label_map.txt \
--max_detections 10 \
--centernet_include_keypoints true \
--config_override " \
model{ \
center_net { \
image_resizer { \
fixed_shape_resizer { \
height: 320 \
width: 320 \
} \
} \
} \
}" \
"""
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
from object_detection import export_tflite_graph_lib_tf2
from object_detection.protos import pipeline_pb2
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string(
'pipeline_config_path', None,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.')
flags.DEFINE_string('trained_checkpoint_dir', None,
'Path to trained checkpoint directory')
flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
flags.DEFINE_string(
'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig '
'text proto to override pipeline_config_path.')
flags.DEFINE_integer('max_detections', 10,
'Maximum number of detections (boxes) to return.')
# SSD-specific flags
flags.DEFINE_bool(
'ssd_use_regular_nms', False,
'Flag to set postprocessing op to use Regular NMS instead of Fast NMS '
'(Default false).')
# CenterNet-specific flags
flags.DEFINE_bool(
'centernet_include_keypoints', False,
'Whether to export the predicted keypoint tensors. Only CenterNet model'
' supports this flag.'
)
flags.DEFINE_string(
'keypoint_label_map_path', None,
'Path of the label map used by CenterNet keypoint estimation task. If'
' provided, the label map path in the pipeline config will be replaced by'
' this one. Note that it is only used when exporting CenterNet model for'
' keypoint estimation task.'
)
def main(argv):
del argv # Unused.
flags.mark_flag_as_required('pipeline_config_path')
flags.mark_flag_as_required('trained_checkpoint_dir')
flags.mark_flag_as_required('output_directory')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Parse(f.read(), pipeline_config)
override_config = pipeline_pb2.TrainEvalPipelineConfig()
text_format.Parse(FLAGS.config_override, override_config)
pipeline_config.MergeFrom(override_config)
export_tflite_graph_lib_tf2.export_tflite_model(
pipeline_config, FLAGS.trained_checkpoint_dir, FLAGS.output_directory,
FLAGS.max_detections, FLAGS.ssd_use_regular_nms,
FLAGS.centernet_include_keypoints, FLAGS.keypoint_label_map_path)
if __name__ == '__main__':
app.run(main)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/export_tflite_graph_tf2.py | export_tflite_graph_tf2.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.bipartite_matcher."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class GreedyBipartiteMatcherTest(test_case.TestCase):
def test_get_expected_matches_when_all_rows_are_valid(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.ones([2], dtype=np.bool)
expected_match_results = [-1, 1, 0]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_all_rows_be_default(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
expected_match_results = [-1, 1, 0]
def graph_fn(similarity_matrix):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_no_matches_with_zero_valid_rows(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.zeros([2], dtype=np.bool)
expected_match_results = [-1, -1, -1]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_only_one_valid_row(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.array([True, False], dtype=np.bool)
expected_match_results = [-1, -1, 0]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_only_one_valid_row_at_bottom(self):
similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]],
dtype=np.float32)
valid_rows = np.array([False, True], dtype=np.bool)
expected_match_results = [-1, -1, 0]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/matchers/bipartite_matcher_tf1_test.py | bipartite_matcher_tf1_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import matcher
from object_detection.utils import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False,
use_matmul_gather=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and umatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
super(ArgMaxMatcher, self).__init__(use_matmul_gather=use_matmul_gather)
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: {}, unmatched: {}'.format(
self._matched_threshold,
self._unmatched_threshold))
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix, valid_rows):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
valid_rows: a boolean tensor of shape [N] indicating valid rows.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = (
tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1]) *
tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32))
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if shape_utils.get_dim_as_int(similarity_matrix.shape[0]) == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/matchers/argmax_matcher.py | argmax_matcher.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.matchers.argmax_matcher."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.matchers import argmax_matcher
from object_detection.utils import test_case
class ArgMaxMatcherTest(test_case.TestCase):
def test_return_correct_matches_with_default_thresholds(self):
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
match = matcher.match(similarity_matrix)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1., 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_rows = np.array([2, 0, 1, 0, 1])
(res_matched_cols, res_unmatched_cols,
res_match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(res_match_results[res_matched_cols],
expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], [0, 1, 2, 3, 4])
self.assertFalse(np.all(res_unmatched_cols))
def test_return_correct_matches_with_empty_rows(self):
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
match = matcher.match(similarity_matrix)
return match.unmatched_column_indicator()
similarity = 0.2 * np.ones([0, 5], dtype=np.float32)
res_unmatched_cols = self.execute(graph_fn, [similarity])
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], np.arange(5))
def test_return_correct_matches_with_matched_threshold(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1, 2])
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_with_matched_and_unmatched_threshold(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_negatives_lower_than_unmatched_false(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(
matched_threshold=3.,
unmatched_threshold=2.,
negatives_lower_than_unmatched=False)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_not_using_force_match(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3])
expected_matched_rows = np.array([2, 0])
expected_unmatched_cols = np.array([1, 2, 4])
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_while_using_force_match(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.,
force_match_for_each_row=True)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([2, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_using_force_match_padded_groundtruth(self):
def graph_fn(similarity, valid_rows):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.,
force_match_for_each_row=True)
match = matcher.match(similarity, valid_rows)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[0, 0, 0, 0, 0],
[3, 0, -1, 2, 0],
[0, 0, 0, 0, 0]], dtype=np.float32)
valid_rows = np.array([True, True, False, True, False])
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([3, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity, valid_rows])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_valid_arguments_corner_case(self):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1)
def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1,
negatives_lower_than_unmatched=False)
def test_invalid_arguments_no_matched_threshold(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=None,
unmatched_threshold=4)
def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=2)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/matchers/argmax_matcher_test.py | argmax_matcher_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.bipartite_matcher."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.matchers import hungarian_matcher # pylint: disable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class HungarianBipartiteMatcherTest(test_case.TestCase):
def test_get_expected_matches_when_all_rows_are_valid(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.ones([2], dtype=np.bool)
expected_match_results = [-1, 1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_all_rows_be_default(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
expected_match_results = [-1, 1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_no_matches_with_zero_valid_rows(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.zeros([2], dtype=np.bool)
expected_match_results = [-1, -1, -1]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_only_one_valid_row(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.array([True, False], dtype=np.bool)
expected_match_results = [-1, -1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_only_one_valid_row_at_bottom(self):
similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]],
dtype=np.float32)
valid_rows = np.array([False, True], dtype=np.bool)
expected_match_results = [-1, -1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_two_valid_rows(self):
similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8],
[0.84, 0.32, 0.2]],
dtype=np.float32)
valid_rows = np.array([True, False, True], dtype=np.bool)
expected_match_results = [1, -1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/matchers/hungarian_matcher_tf2_test.py | hungarian_matcher_tf2_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bipartite matcher implementation."""
import tensorflow.compat.v1 as tf
from tensorflow.contrib.image.python.ops import image_ops
from object_detection.core import matcher
class GreedyBipartiteMatcher(matcher.Matcher):
"""Wraps a Tensorflow greedy bipartite matcher."""
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
"""
super(GreedyBipartiteMatcher, self).__init__(
use_matmul_gather=use_matmul_gather)
def _match(self, similarity_matrix, valid_rows):
"""Bipartite matches a collection rows and columns. A greedy bi-partite.
TODO(rathodv): Add num_valid_columns options to match only that many columns
with all the rows.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid.
Returns:
match_results: int32 tensor of shape [M] with match_results[i]=-1
meaning that column i is not matched and otherwise that it is matched to
row match_results[i].
"""
valid_row_sim_matrix = tf.gather(similarity_matrix,
tf.squeeze(tf.where(valid_rows), axis=-1))
invalid_row_sim_matrix = tf.gather(
similarity_matrix,
tf.squeeze(tf.where(tf.logical_not(valid_rows)), axis=-1))
similarity_matrix = tf.concat(
[valid_row_sim_matrix, invalid_row_sim_matrix], axis=0)
# Convert similarity matrix to distance matrix as tf.image.bipartite tries
# to find minimum distance matches.
distance_matrix = -1 * similarity_matrix
num_valid_rows = tf.reduce_sum(tf.cast(valid_rows, dtype=tf.float32))
_, match_results = image_ops.bipartite_match(
distance_matrix, num_valid_rows=num_valid_rows)
match_results = tf.reshape(match_results, [-1])
match_results = tf.cast(match_results, tf.int32)
return match_results
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/matchers/bipartite_matcher.py | bipartite_matcher.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hungarian bipartite matcher implementation."""
import numpy as np
from scipy.optimize import linear_sum_assignment
import tensorflow.compat.v1 as tf
from object_detection.core import matcher
class HungarianBipartiteMatcher(matcher.Matcher):
"""Wraps a Hungarian bipartite matcher into TensorFlow."""
def _match(self, similarity_matrix, valid_rows):
"""Optimally bipartite matches a collection rows and columns.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid.
Returns:
match_results: int32 tensor of shape [M] with match_results[i]=-1
meaning that column i is not matched and otherwise that it is matched to
row match_results[i].
"""
valid_row_sim_matrix = tf.gather(similarity_matrix,
tf.squeeze(tf.where(valid_rows), axis=-1))
distance_matrix = -1 * valid_row_sim_matrix
def numpy_wrapper(inputs):
def numpy_matching(input_matrix):
row_indices, col_indices = linear_sum_assignment(input_matrix)
match_results = np.full(input_matrix.shape[1], -1)
match_results[col_indices] = row_indices
return match_results.astype(np.int32)
return tf.numpy_function(numpy_matching, inputs, Tout=[tf.int32])
matching_result = tf.autograph.experimental.do_not_convert(
numpy_wrapper)([distance_matrix])
return tf.reshape(matching_result, [-1])
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/matchers/hungarian_matcher.py | hungarian_matcher.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the OpenImages label expansion (OIDHierarchicalLabelsExpansion)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import oid_hierarchical_labels_expansion
def create_test_data():
hierarchy = {
'LabelName':
'a',
'Subcategory': [{
'LabelName': 'b'
}, {
'LabelName':
'c',
'Subcategory': [{
'LabelName': 'd'
}, {
'LabelName': 'e'
}, {
'LabelName': 'f',
'Subcategory': [{
'LabelName': 'd'
},]
}]
}, {
'LabelName': 'f',
'Subcategory': [{
'LabelName': 'd'
},]
}]
}
bbox_rows = [
'123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0',
'123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0'
]
label_rows = [
'123,verification,b,0', '123,verification,c,0', '124,verification,d,1'
]
segm_rows = [
'123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK',
'123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK',
]
return hierarchy, bbox_rows, segm_rows, label_rows
class HierarchicalLabelsExpansionTest(tf.test.TestCase):
def test_bbox_expansion(self):
hierarchy, bbox_rows, _, _ = create_test_data()
expansion_generator = (
oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(
hierarchy))
all_result_rows = []
for row in bbox_rows:
all_result_rows.extend(
expansion_generator.expand_boxes_or_segments_from_csv(row, 2))
self.assertItemsEqual([
'123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0',
'123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0',
'123,xclick,f,1,0.2,0.3,0.1,0.2,1,1,0,0,0',
'123,xclick,c,1,0.2,0.3,0.1,0.2,1,1,0,0,0'
], all_result_rows)
def test_segm_expansion(self):
hierarchy, _, segm_rows, _ = create_test_data()
expansion_generator = (
oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(
hierarchy))
all_result_rows = []
for row in segm_rows:
all_result_rows.extend(
expansion_generator.expand_boxes_or_segments_from_csv(row, 2))
self.assertItemsEqual([
'123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK',
'123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK',
'123,cc,f,100,100,0.2,0.3,0.1,0.2,0,MASK',
'123,cc,c,100,100,0.2,0.3,0.1,0.2,0,MASK'
], all_result_rows)
def test_labels_expansion(self):
hierarchy, _, _, label_rows = create_test_data()
expansion_generator = (
oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(
hierarchy))
all_result_rows = []
for row in label_rows:
all_result_rows.extend(
expansion_generator.expand_labels_from_csv(row, 2, 3))
self.assertItemsEqual([
'123,verification,b,0', '123,verification,c,0', '123,verification,d,0',
'123,verification,f,0', '123,verification,e,0', '124,verification,d,1',
'124,verification,f,1', '124,verification,c,1'
], all_result_rows)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py | oid_hierarchical_labels_expansion_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Utilities for creating TFRecords of TF examples for the Open Images dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import dataset_util
def tf_example_from_annotations_data_frame(annotations_data_frame, label_map,
encoded_image):
"""Populates a TF Example message with image annotations from a data frame.
Args:
annotations_data_frame: Data frame containing the annotations for a single
image.
label_map: String to integer label map.
encoded_image: The encoded image string
Returns:
The populated TF Example, if the label of at least one object is present in
label_map. Otherwise, returns None.
"""
filtered_data_frame = annotations_data_frame[
annotations_data_frame.LabelName.isin(label_map)]
filtered_data_frame_boxes = filtered_data_frame[
~filtered_data_frame.YMin.isnull()]
filtered_data_frame_labels = filtered_data_frame[
filtered_data_frame.YMin.isnull()]
image_id = annotations_data_frame.ImageID.iloc[0]
feature_map = {
standard_fields.TfExampleFields.object_bbox_ymin:
dataset_util.float_list_feature(
filtered_data_frame_boxes.YMin.to_numpy()),
standard_fields.TfExampleFields.object_bbox_xmin:
dataset_util.float_list_feature(
filtered_data_frame_boxes.XMin.to_numpy()),
standard_fields.TfExampleFields.object_bbox_ymax:
dataset_util.float_list_feature(
filtered_data_frame_boxes.YMax.to_numpy()),
standard_fields.TfExampleFields.object_bbox_xmax:
dataset_util.float_list_feature(
filtered_data_frame_boxes.XMax.to_numpy()),
standard_fields.TfExampleFields.object_class_text:
dataset_util.bytes_list_feature([
six.ensure_binary(label_text)
for label_text in filtered_data_frame_boxes.LabelName.to_numpy()
]),
standard_fields.TfExampleFields.object_class_label:
dataset_util.int64_list_feature(
filtered_data_frame_boxes.LabelName.map(
lambda x: label_map[x]).to_numpy()),
standard_fields.TfExampleFields.filename:
dataset_util.bytes_feature(
six.ensure_binary('{}.jpg'.format(image_id))),
standard_fields.TfExampleFields.source_id:
dataset_util.bytes_feature(six.ensure_binary(image_id)),
standard_fields.TfExampleFields.image_encoded:
dataset_util.bytes_feature(six.ensure_binary(encoded_image)),
}
if 'IsGroupOf' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_group_of] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsGroupOf.to_numpy().astype(int))
if 'IsOccluded' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_occluded] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsOccluded.to_numpy().astype(
int))
if 'IsTruncated' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_truncated] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsTruncated.to_numpy().astype(
int))
if 'IsDepiction' in filtered_data_frame.columns:
feature_map[standard_fields.TfExampleFields.
object_depiction] = dataset_util.int64_list_feature(
filtered_data_frame_boxes.IsDepiction.to_numpy().astype(
int))
if 'ConfidenceImageLabel' in filtered_data_frame_labels.columns:
feature_map[standard_fields.TfExampleFields.
image_class_label] = dataset_util.int64_list_feature(
filtered_data_frame_labels.LabelName.map(
lambda x: label_map[x]).to_numpy())
feature_map[standard_fields.TfExampleFields
.image_class_text] = dataset_util.bytes_list_feature([
six.ensure_binary(label_text) for label_text in
filtered_data_frame_labels.LabelName.to_numpy()
]),
return tf.train.Example(features=tf.train.Features(feature=feature_map))
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/oid_tfrecord_creation.py | oid_tfrecord_creation.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_pascal_tf_record.py."""
import os
import numpy as np
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import create_pascal_tf_record
class CreatePascalTFRecordTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def test_dict_to_tf_example(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
data = {
'folder': '',
'filename': image_file_name,
'size': {
'height': 256,
'width': 256,
},
'object': [
{
'difficult': 1,
'bndbox': {
'xmin': 64,
'ymin': 64,
'xmax': 192,
'ymax': 192,
},
'name': 'person',
'truncated': 0,
'pose': '',
},
],
}
label_map_dict = {
'background': 0,
'person': 1,
'notperson': 2,
}
example = create_pascal_tf_record.dict_to_tf_example(
data, self.get_temp_dir(), label_map_dict, image_subdirectory='')
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('person')])
self._assertProtoEqual(
example.features.feature['image/object/class/label'].int64_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/difficult'].int64_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/truncated'].int64_list.value,
[0])
self._assertProtoEqual(
example.features.feature['image/object/view'].bytes_list.value,
[six.b('')])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_pascal_tf_record_test.py | create_pascal_tf_record_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert the Oxford pet dataset to TFRecord for object_detection.
See: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar
Cats and Dogs
IEEE Conference on Computer Vision and Pattern Recognition, 2012
http://www.robots.ox.ac.uk/~vgg/data/pets/
Example usage:
python object_detection/dataset_tools/create_pet_tf_record.py \
--data_dir=/home/user/pet \
--output_dir=/home/user/pet/output
"""
import hashlib
import io
import logging
import os
import random
import re
import contextlib2
from lxml import etree
import numpy as np
import PIL.Image
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.')
flags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.')
flags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('faces_only', True, 'If True, generates bounding boxes '
'for pet faces. Otherwise generates bounding boxes (as '
'well as segmentations for full pet bodies). Note that '
'in the latter case, the resulting files are much larger.')
flags.DEFINE_string('mask_type', 'png', 'How to represent instance '
'segmentation masks. Options are "png" or "numerical".')
flags.DEFINE_integer('num_shards', 10, 'Number of TFRecord shards')
FLAGS = flags.FLAGS
def get_class_name_from_filename(file_name):
"""Gets the class name from a file.
Args:
file_name: The file name to get the class name from.
ie. "american_pit_bull_terrier_105.jpg"
Returns:
A string of the class name.
"""
match = re.match(r'([A-Za-z_]+)(_[0-9]+\.jpg)', file_name, re.I)
return match.groups()[0]
def dict_to_tf_example(data,
mask_path,
label_map_dict,
image_subdirectory,
ignore_difficult_instances=False,
faces_only=True,
mask_type='png'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
mask_path: String path to PNG encoded mask.
label_map_dict: A map from string label names to integers ids.
image_subdirectory: String specifying subdirectory within the
Pascal dataset directory holding the actual image data.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
faces_only: If True, generates bounding boxes for pet faces. Otherwise
generates bounding boxes (as well as segmentations for full pet bodies).
mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to
smaller file sizes.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(image_subdirectory, data['filename'])
with tf.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
with tf.gfile.GFile(mask_path, 'rb') as fid:
encoded_mask_png = fid.read()
encoded_png_io = io.BytesIO(encoded_mask_png)
mask = PIL.Image.open(encoded_png_io)
if mask.format != 'PNG':
raise ValueError('Mask format not PNG')
mask_np = np.asarray(mask)
nonbackground_indices_x = np.any(mask_np != 2, axis=0)
nonbackground_indices_y = np.any(mask_np != 2, axis=1)
nonzero_x_indices = np.where(nonbackground_indices_x)
nonzero_y_indices = np.where(nonbackground_indices_y)
width = int(data['size']['width'])
height = int(data['size']['height'])
xmins = []
ymins = []
xmaxs = []
ymaxs = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
masks = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
if faces_only:
xmin = float(obj['bndbox']['xmin'])
xmax = float(obj['bndbox']['xmax'])
ymin = float(obj['bndbox']['ymin'])
ymax = float(obj['bndbox']['ymax'])
else:
xmin = float(np.min(nonzero_x_indices))
xmax = float(np.max(nonzero_x_indices))
ymin = float(np.min(nonzero_y_indices))
ymax = float(np.max(nonzero_y_indices))
xmins.append(xmin / width)
ymins.append(ymin / height)
xmaxs.append(xmax / width)
ymaxs.append(ymax / height)
class_name = get_class_name_from_filename(data['filename'])
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
if not faces_only:
mask_remapped = (mask_np != 2).astype(np.uint8)
masks.append(mask_remapped)
feature_dict = {
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}
if not faces_only:
if mask_type == 'numerical':
mask_stack = np.stack(masks).astype(np.float32)
masks_flattened = np.reshape(mask_stack, [-1])
feature_dict['image/object/mask'] = (
dataset_util.float_list_feature(masks_flattened.tolist()))
elif mask_type == 'png':
encoded_mask_png_list = []
for mask in masks:
img = PIL.Image.fromarray(mask)
output = io.BytesIO()
img.save(output, format='PNG')
encoded_mask_png_list.append(output.getvalue())
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png_list))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
def create_tf_record(output_filename,
num_shards,
label_map_dict,
annotations_dir,
image_dir,
examples,
faces_only=True,
mask_type='png'):
"""Creates a TFRecord file from examples.
Args:
output_filename: Path to where output file is saved.
num_shards: Number of shards for output file.
label_map_dict: The label map dictionary.
annotations_dir: Directory where annotation files are stored.
image_dir: Directory where image files are stored.
examples: Examples to parse and save to tf record.
faces_only: If True, generates bounding boxes for pet faces. Otherwise
generates bounding boxes (as well as segmentations for full pet bodies).
mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to
smaller file sizes.
"""
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_filename, num_shards)
for idx, example in enumerate(examples):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples))
xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')
mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png')
if not os.path.exists(xml_path):
logging.warning('Could not find %s, ignoring example.', xml_path)
continue
with tf.gfile.GFile(xml_path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
try:
tf_example = dict_to_tf_example(
data,
mask_path,
label_map_dict,
image_dir,
faces_only=faces_only,
mask_type=mask_type)
if tf_example:
shard_idx = idx % num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
except ValueError:
logging.warning('Invalid example: %s, ignoring.', xml_path)
# TODO(derekjchow): Add test for pet/PASCAL main files.
def main(_):
data_dir = FLAGS.data_dir
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
logging.info('Reading from Pet dataset.')
image_dir = os.path.join(data_dir, 'images')
annotations_dir = os.path.join(data_dir, 'annotations')
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(0.7 * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_val.record')
if not FLAGS.faces_only:
train_output_path = os.path.join(FLAGS.output_dir,
'pets_fullbody_with_masks_train.record')
val_output_path = os.path.join(FLAGS.output_dir,
'pets_fullbody_with_masks_val.record')
create_tf_record(
train_output_path,
FLAGS.num_shards,
label_map_dict,
annotations_dir,
image_dir,
train_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
create_tf_record(
val_output_path,
FLAGS.num_shards,
label_map_dict,
annotations_dir,
image_dir,
val_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_pet_tf_record.py | create_pet_tf_record.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Code to download and parse the AVA Actions dataset for TensorFlow models.
The [AVA Actions data set](
https://research.google.com/ava/index.html)
is a dataset for human action recognition.
This script downloads the annotations and prepares data from similar annotations
if local video files are available. The video files can be downloaded
from the following website:
https://github.com/cvdfoundation/ava-dataset
Prior to running this script, please run download_and_preprocess_ava.sh to
download input videos.
Running this code as a module generates the data set on disk. First, the
required files are downloaded (_download_data) which enables constructing the
label map. Then (in generate_examples), for each split in the data set, the
metadata and image frames are generated from the annotations for each sequence
example (_generate_examples). The data set is written to disk as a set of
numbered TFRecord files.
Generating the data on disk can take considerable time and disk space.
(Image compression quality is the primary determiner of disk usage.
If using the Tensorflow Object Detection API, set the input_type field
in the input_reader to TF_SEQUENCE_EXAMPLE. If using this script to generate
data for Context R-CNN scripts, the --examples_for_context flag should be
set to true, so that properly-formatted tf.example objects are written to disk.
This data is structured for per-clip action classification where images is
the sequence of images and labels are a one-hot encoded value. See
as_dataset() for more details.
Note that the number of videos changes in the data set over time, so it will
likely be necessary to change the expected number of examples.
The argument video_path_format_string expects a value as such:
'/path/to/videos/{0}'
"""
import collections
import contextlib
import csv
import glob
import hashlib
import os
import random
import sys
import zipfile
from absl import app
from absl import flags
from absl import logging
import cv2
from six.moves import range
from six.moves import urllib
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import seq_example_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
POSSIBLE_TIMESTAMPS = range(902, 1798)
ANNOTATION_URL = 'https://research.google.com/ava/download/ava_v2.2.zip'
SECONDS_TO_MILLI = 1000
FILEPATTERN = 'ava_actions_%s_1fps_rgb'
SPLITS = {
'train': {
'shards': 1000,
'examples': 862663,
'csv': '',
'excluded-csv': ''
},
'val': {
'shards': 100,
'examples': 243029,
'csv': '',
'excluded-csv': ''
},
# Test doesn't have ground truth, so TF Records can't be created
'test': {
'shards': 100,
'examples': 0,
'csv': '',
'excluded-csv': ''
}
}
NUM_CLASSES = 80
def feature_list_feature(value):
return tf.train.FeatureList(feature=value)
class Ava(object):
"""Generates and loads the AVA Actions 2.2 data set."""
def __init__(self, path_to_output_dir, path_to_data_download):
if not path_to_output_dir:
raise ValueError('You must supply the path to the data directory.')
self.path_to_data_download = path_to_data_download
self.path_to_output_dir = path_to_output_dir
def generate_and_write_records(self,
splits_to_process='train,val,test',
video_path_format_string=None,
seconds_per_sequence=10,
hop_between_sequences=10,
examples_for_context=False):
"""Downloads data and generates sharded TFRecords.
Downloads the data files, generates metadata, and processes the metadata
with MediaPipe to produce tf.SequenceExamples for training. The resulting
files can be read with as_dataset(). After running this function the
original data files can be deleted.
Args:
splits_to_process: csv string of which splits to process. Allows
providing a custom CSV with the CSV flag. The original data is still
downloaded to generate the label_map.
video_path_format_string: The format string for the path to local files.
seconds_per_sequence: The length of each sequence, in seconds.
hop_between_sequences: The gap between the centers of
successive sequences.
examples_for_context: Whether to generate sequence examples with context
for context R-CNN.
"""
example_function = self._generate_sequence_examples
if examples_for_context:
example_function = self._generate_examples
logging.info('Downloading data.')
download_output = self._download_data()
for key in splits_to_process.split(','):
logging.info('Generating examples for split: %s', key)
all_metadata = list(example_function(
download_output[0][key][0], download_output[0][key][1],
download_output[1], seconds_per_sequence, hop_between_sequences,
video_path_format_string))
logging.info('An example of the metadata: ')
logging.info(all_metadata[0])
random.seed(47)
random.shuffle(all_metadata)
shards = SPLITS[key]['shards']
shard_names = [os.path.join(
self.path_to_output_dir, FILEPATTERN % key + '-%05d-of-%05d' % (
i, shards)) for i in range(shards)]
writers = [tf.io.TFRecordWriter(shard) for shard in shard_names]
with _close_on_exit(writers) as writers:
for i, seq_ex in enumerate(all_metadata):
writers[i % len(writers)].write(seq_ex.SerializeToString())
logging.info('Data extraction complete.')
def _generate_sequence_examples(self, annotation_file, excluded_file,
label_map, seconds_per_sequence,
hop_between_sequences,
video_path_format_string):
"""For each row in the annotation CSV, generates corresponding examples.
When iterating through frames for a single sequence example, skips over
excluded frames. When moving to the next sequence example, also skips over
excluded frames as if they don't exist. Generates equal-length sequence
examples, each with length seconds_per_sequence (1 fps) and gaps of
hop_between_sequences frames (and seconds) between them, possible greater
due to excluded frames.
Args:
annotation_file: path to the file of AVA CSV annotations.
excluded_file: path to a CSV file of excluded timestamps for each video.
label_map: an {int: string} label map.
seconds_per_sequence: The number of seconds per example in each example.
hop_between_sequences: The hop between sequences. If less than
seconds_per_sequence, will overlap.
video_path_format_string: File path format to glob video files.
Yields:
Each prepared tf.SequenceExample of metadata also containing video frames
"""
fieldnames = ['id', 'timestamp_seconds', 'xmin', 'ymin', 'xmax', 'ymax',
'action_label']
frame_excluded = {}
# create a sparse, nested map of videos and frame indices.
with open(excluded_file, 'r') as excluded:
reader = csv.reader(excluded)
for row in reader:
frame_excluded[(row[0], int(float(row[1])))] = True
with open(annotation_file, 'r') as annotations:
reader = csv.DictReader(annotations, fieldnames)
frame_annotations = collections.defaultdict(list)
ids = set()
# aggreggate by video and timestamp:
for row in reader:
ids.add(row['id'])
key = (row['id'], int(float(row['timestamp_seconds'])))
frame_annotations[key].append(row)
# for each video, find aggregates near each sampled frame.:
logging.info('Generating metadata...')
media_num = 1
for media_id in ids:
logging.info('%d/%d, ignore warnings.\n', media_num, len(ids))
media_num += 1
filepath = glob.glob(
video_path_format_string.format(media_id) + '*')[0]
cur_vid = cv2.VideoCapture(filepath)
width = cur_vid.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cur_vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
middle_frame_time = POSSIBLE_TIMESTAMPS[0]
while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]:
start_time = middle_frame_time - seconds_per_sequence // 2 - (
0 if seconds_per_sequence % 2 == 0 else 1)
end_time = middle_frame_time + (seconds_per_sequence // 2)
total_boxes = []
total_labels = []
total_label_strings = []
total_images = []
total_source_ids = []
total_confidences = []
total_is_annotated = []
windowed_timestamp = start_time
while windowed_timestamp < end_time:
if (media_id, windowed_timestamp) in frame_excluded:
end_time += 1
windowed_timestamp += 1
logging.info('Ignoring and skipping excluded frame.')
continue
cur_vid.set(cv2.CAP_PROP_POS_MSEC,
(windowed_timestamp) * SECONDS_TO_MILLI)
_, image = cur_vid.read()
_, buffer = cv2.imencode('.jpg', image)
bufstring = buffer.tostring()
total_images.append(bufstring)
source_id = str(windowed_timestamp) + '_' + media_id
total_source_ids.append(source_id)
total_is_annotated.append(1)
boxes = []
labels = []
label_strings = []
confidences = []
for row in frame_annotations[(media_id, windowed_timestamp)]:
if len(row) > 2 and int(row['action_label']) in label_map:
boxes.append([float(row['ymin']), float(row['xmin']),
float(row['ymax']), float(row['xmax'])])
labels.append(int(row['action_label']))
label_strings.append(label_map[int(row['action_label'])])
confidences.append(1)
else:
logging.warning('Unknown label: %s', row['action_label'])
total_boxes.append(boxes)
total_labels.append(labels)
total_label_strings.append(label_strings)
total_confidences.append(confidences)
windowed_timestamp += 1
if total_boxes:
yield seq_example_util.make_sequence_example(
'AVA', media_id, total_images, int(height), int(width), 'jpeg',
total_source_ids, None, total_is_annotated, total_boxes,
total_label_strings, use_strs_for_source_id=True)
# Move middle_time_frame, skipping excluded frames
frames_mv = 0
frames_excluded_count = 0
while (frames_mv < hop_between_sequences + frames_excluded_count
and middle_frame_time + frames_mv < POSSIBLE_TIMESTAMPS[-1]):
frames_mv += 1
if (media_id, windowed_timestamp + frames_mv) in frame_excluded:
frames_excluded_count += 1
middle_frame_time += frames_mv
cur_vid.release()
def _generate_examples(self, annotation_file, excluded_file, label_map,
seconds_per_sequence, hop_between_sequences,
video_path_format_string):
"""For each row in the annotation CSV, generates examples.
When iterating through frames for a single example, skips
over excluded frames. Generates equal-length sequence examples, each with
length seconds_per_sequence (1 fps) and gaps of hop_between_sequences
frames (and seconds) between them, possible greater due to excluded frames.
Args:
annotation_file: path to the file of AVA CSV annotations.
excluded_file: path to a CSV file of excluded timestamps for each video.
label_map: an {int: string} label map.
seconds_per_sequence: The number of seconds per example in each example.
hop_between_sequences: The hop between sequences. If less than
seconds_per_sequence, will overlap.
video_path_format_string: File path format to glob video files.
Yields:
Each prepared tf.Example of metadata also containing video frames
"""
del seconds_per_sequence
del hop_between_sequences
fieldnames = ['id', 'timestamp_seconds', 'xmin', 'ymin', 'xmax', 'ymax',
'action_label']
frame_excluded = {}
# create a sparse, nested map of videos and frame indices.
with open(excluded_file, 'r') as excluded:
reader = csv.reader(excluded)
for row in reader:
frame_excluded[(row[0], int(float(row[1])))] = True
with open(annotation_file, 'r') as annotations:
reader = csv.DictReader(annotations, fieldnames)
frame_annotations = collections.defaultdict(list)
ids = set()
# aggreggate by video and timestamp:
for row in reader:
ids.add(row['id'])
key = (row['id'], int(float(row['timestamp_seconds'])))
frame_annotations[key].append(row)
# for each video, find aggreggates near each sampled frame.:
logging.info('Generating metadata...')
media_num = 1
for media_id in ids:
logging.info('%d/%d, ignore warnings.\n', media_num, len(ids))
media_num += 1
filepath = glob.glob(
video_path_format_string.format(media_id) + '*')[0]
cur_vid = cv2.VideoCapture(filepath)
width = cur_vid.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cur_vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
middle_frame_time = POSSIBLE_TIMESTAMPS[0]
total_non_excluded = 0
while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]:
if (media_id, middle_frame_time) not in frame_excluded:
total_non_excluded += 1
middle_frame_time += 1
middle_frame_time = POSSIBLE_TIMESTAMPS[0]
cur_frame_num = 0
while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]:
cur_vid.set(cv2.CAP_PROP_POS_MSEC,
middle_frame_time * SECONDS_TO_MILLI)
_, image = cur_vid.read()
_, buffer = cv2.imencode('.jpg', image)
bufstring = buffer.tostring()
if (media_id, middle_frame_time) in frame_excluded:
middle_frame_time += 1
logging.info('Ignoring and skipping excluded frame.')
continue
cur_frame_num += 1
source_id = str(middle_frame_time) + '_' + media_id
xmins = []
xmaxs = []
ymins = []
ymaxs = []
areas = []
labels = []
label_strings = []
confidences = []
for row in frame_annotations[(media_id, middle_frame_time)]:
if len(row) > 2 and int(row['action_label']) in label_map:
xmins.append(float(row['xmin']))
xmaxs.append(float(row['xmax']))
ymins.append(float(row['ymin']))
ymaxs.append(float(row['ymax']))
areas.append(float((xmaxs[-1] - xmins[-1]) *
(ymaxs[-1] - ymins[-1])) / 2)
labels.append(int(row['action_label']))
label_strings.append(label_map[int(row['action_label'])])
confidences.append(1)
else:
logging.warning('Unknown label: %s', row['action_label'])
middle_frame_time += 1/3
if abs(middle_frame_time - round(middle_frame_time) < 0.0001):
middle_frame_time = round(middle_frame_time)
key = hashlib.sha256(bufstring).hexdigest()
date_captured_feature = (
'2020-06-17 00:%02d:%02d' % ((middle_frame_time - 900)*3 // 60,
(middle_frame_time - 900)*3 % 60))
context_feature_dict = {
'image/height':
dataset_util.int64_feature(int(height)),
'image/width':
dataset_util.int64_feature(int(width)),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(source_id.encode('utf8')),
'image/filename':
dataset_util.bytes_feature(source_id.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(bufstring),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymaxs),
'image/object/area':
dataset_util.float_list_feature(areas),
'image/object/class/label':
dataset_util.int64_list_feature(labels),
'image/object/class/text':
dataset_util.bytes_list_feature(label_strings),
'image/location':
dataset_util.bytes_feature(media_id.encode('utf8')),
'image/date_captured':
dataset_util.bytes_feature(
date_captured_feature.encode('utf8')),
'image/seq_num_frames':
dataset_util.int64_feature(total_non_excluded),
'image/seq_frame_num':
dataset_util.int64_feature(cur_frame_num),
'image/seq_id':
dataset_util.bytes_feature(media_id.encode('utf8')),
}
yield tf.train.Example(
features=tf.train.Features(feature=context_feature_dict))
cur_vid.release()
def _download_data(self):
"""Downloads and extracts data if not already available."""
if sys.version_info >= (3, 0):
urlretrieve = urllib.request.urlretrieve
else:
urlretrieve = urllib.request.urlretrieve
logging.info('Creating data directory.')
tf.io.gfile.makedirs(self.path_to_data_download)
logging.info('Downloading annotations.')
paths = {}
zip_path = os.path.join(self.path_to_data_download,
ANNOTATION_URL.split('/')[-1])
urlretrieve(ANNOTATION_URL, zip_path)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(self.path_to_data_download)
for split in ['train', 'test', 'val']:
csv_path = os.path.join(self.path_to_data_download,
'ava_%s_v2.2.csv' % split)
excl_name = 'ava_%s_excluded_timestamps_v2.2.csv' % split
excluded_csv_path = os.path.join(self.path_to_data_download, excl_name)
SPLITS[split]['csv'] = csv_path
SPLITS[split]['excluded-csv'] = excluded_csv_path
paths[split] = (csv_path, excluded_csv_path)
label_map = self.get_label_map(os.path.join(
self.path_to_data_download,
'ava_action_list_v2.2_for_activitynet_2019.pbtxt'))
return paths, label_map
def get_label_map(self, path):
"""Parses a label map into {integer:string} format."""
label_map_dict = label_map_util.get_label_map_dict(path)
label_map_dict = {v: bytes(k, 'utf8') for k, v in label_map_dict.items()}
logging.info(label_map_dict)
return label_map_dict
@contextlib.contextmanager
def _close_on_exit(writers):
"""Call close on all writers on exit."""
try:
yield writers
finally:
for writer in writers:
writer.close()
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
Ava(flags.FLAGS.path_to_output_dir,
flags.FLAGS.path_to_download_data).generate_and_write_records(
flags.FLAGS.splits_to_process,
flags.FLAGS.video_path_format_string,
flags.FLAGS.seconds_per_sequence,
flags.FLAGS.hop_between_sequences,
flags.FLAGS.examples_for_context)
if __name__ == '__main__':
flags.DEFINE_string('path_to_download_data',
'',
'Path to directory to download data to.')
flags.DEFINE_string('path_to_output_dir',
'',
'Path to directory to write data to.')
flags.DEFINE_string('splits_to_process',
'train,val',
'Process these splits. Useful for custom data splits.')
flags.DEFINE_string('video_path_format_string',
None,
'The format string for the path to local video files. '
'Uses the Python string.format() syntax with possible '
'arguments of {video}, {start}, {end}, {label_name}, and '
'{split}, corresponding to columns of the data csvs.')
flags.DEFINE_integer('seconds_per_sequence',
10,
'The number of seconds per example in each example.'
'Always 1 when examples_for_context is True.')
flags.DEFINE_integer('hop_between_sequences',
10,
'The hop between sequences. If less than '
'seconds_per_sequence, will overlap. Always 1 when '
'examples_for_context is True.')
flags.DEFINE_boolean('examples_for_context',
False,
'Whether to generate examples instead of sequence '
'examples. If true, will generate tf.Example objects '
'for use in Context R-CNN.')
app.run(main)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_ava_actions_tf_record.py | create_ava_actions_tf_record.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for oid_tfrecord_creation.py."""
import pandas as pd
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import oid_tfrecord_creation
def create_test_data():
data = {
'ImageID': ['i1', 'i1', 'i1', 'i1', 'i1', 'i2', 'i2'],
'LabelName': ['a', 'a', 'b', 'b', 'c', 'b', 'c'],
'YMin': [0.3, 0.6, 0.8, 0.1, None, 0.0, 0.0],
'XMin': [0.1, 0.3, 0.7, 0.0, None, 0.1, 0.1],
'XMax': [0.2, 0.3, 0.8, 0.5, None, 0.9, 0.9],
'YMax': [0.3, 0.6, 1, 0.8, None, 0.8, 0.8],
'IsOccluded': [0, 1, 1, 0, None, 0, 0],
'IsTruncated': [0, 0, 0, 1, None, 0, 0],
'IsGroupOf': [0, 0, 0, 0, None, 0, 1],
'IsDepiction': [1, 0, 0, 0, None, 0, 0],
'ConfidenceImageLabel': [None, None, None, None, 0, None, None],
}
df = pd.DataFrame(data=data)
label_map = {'a': 0, 'b': 1, 'c': 2}
return label_map, df
class TfExampleFromAnnotationsDataFrameTests(tf.test.TestCase):
def test_simple(self):
label_map, df = create_test_data()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(six.ensure_str("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6, 0.8, 0.1] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3, 0.7, 0.0] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6, 1.0, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3, 0.8, 0.5] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0, 1, 1] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a", "b", "b"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0, 0, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0, 0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1, 1, 0] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0, 0, 1] } } }
feature {
key: "image/class/label"
value { int64_list { value: [2] } } }
feature {
key: "image/class/text"
value { bytes_list { value: ["c"] } } } }
"""), tf_example)
def test_no_attributes(self):
label_map, df = create_test_data()
del df['IsDepiction']
del df['IsGroupOf']
del df['IsOccluded']
del df['IsTruncated']
del df['ConfidenceImageLabel']
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i2'], label_map, 'encoded_image_test')
self.assertProtoEquals(six.ensure_str("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i2.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.0, 0.0] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.1] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.8, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.9, 0.9] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [1, 2] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["b", "c"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i2" } } } }
"""), tf_example)
def test_label_filtering(self):
label_map, df = create_test_data()
label_map = {'a': 0}
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(
six.ensure_str("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/class/label"
value { int64_list { } } }
feature {
key: "image/class/text"
value { bytes_list { } } } }
"""), tf_example)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/oid_tfrecord_creation_test.py | oid_tfrecord_creation_test.py |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_record_creation_util.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import tf_record_creation_util
class OpenOutputTfrecordsTests(tf.test.TestCase):
def test_sharded_tfrecord_writes(self):
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack,
os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10)
for idx in range(10):
output_tfrecords[idx].write(six.ensure_binary('test_{}'.format(idx)))
for idx in range(10):
tf_record_path = '{}-{:05d}-of-00010'.format(
os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx)
records = list(tf.python_io.tf_record_iterator(tf_record_path))
self.assertAllEqual(records, ['test_{}'.format(idx).encode('utf-8')])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/tf_record_creation_util_test.py | tf_record_creation_util_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw KITTI detection dataset to TFRecord for object_detection.
Converts KITTI detection dataset to TFRecords with a standard format allowing
to use this dataset to train object detectors. The raw dataset can be
downloaded from:
http://kitti.is.tue.mpg.de/kitti/data_object_image_2.zip.
http://kitti.is.tue.mpg.de/kitti/data_object_label_2.zip
Permission can be requested at the main website.
KITTI detection dataset contains 7481 training images. Using this code with
the default settings will set aside the first 500 images as a validation set.
This can be altered using the flags, see details below.
Example usage:
python object_detection/dataset_tools/create_kitti_tf_record.py \
--data_dir=/home/user/kitti \
--output_path=/home/user/kitti.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import os
import numpy as np
import PIL.Image as pil
import tensorflow.compat.v1 as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
from object_detection.utils.np_box_ops import iou
tf.app.flags.DEFINE_string('data_dir', '', 'Location of root directory for the '
'data. Folder structure is assumed to be:'
'<data_dir>/training/label_2 (annotations) and'
'<data_dir>/data_object_image_2/training/image_2'
'(images).')
tf.app.flags.DEFINE_string('output_path', '', 'Path to which TFRecord files'
'will be written. The TFRecord with the training set'
'will be located at: <output_path>_train.tfrecord.'
'And the TFRecord with the validation set will be'
'located at: <output_path>_val.tfrecord')
tf.app.flags.DEFINE_string('classes_to_use', 'car,pedestrian,dontcare',
'Comma separated list of class names that will be'
'used. Adding the dontcare class will remove all'
'bboxs in the dontcare regions.')
tf.app.flags.DEFINE_string('label_map_path', 'data/kitti_label_map.pbtxt',
'Path to label map proto.')
tf.app.flags.DEFINE_integer('validation_set_size', '500', 'Number of images to'
'be used as a validation set.')
FLAGS = tf.app.flags.FLAGS
def convert_kitti_to_tfrecords(data_dir, output_path, classes_to_use,
label_map_path, validation_set_size):
"""Convert the KITTI detection dataset to TFRecords.
Args:
data_dir: The full path to the unzipped folder containing the unzipped data
from data_object_image_2 and data_object_label_2.zip.
Folder structure is assumed to be: data_dir/training/label_2 (annotations)
and data_dir/data_object_image_2/training/image_2 (images).
output_path: The path to which TFRecord files will be written. The TFRecord
with the training set will be located at: <output_path>_train.tfrecord
And the TFRecord with the validation set will be located at:
<output_path>_val.tfrecord
classes_to_use: List of strings naming the classes for which data should be
converted. Use the same names as presented in the KIITI README file.
Adding dontcare class will remove all other bounding boxes that overlap
with areas marked as dontcare regions.
label_map_path: Path to label map proto
validation_set_size: How many images should be left as the validation set.
(Ffirst `validation_set_size` examples are selected to be in the
validation set).
"""
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
train_count = 0
val_count = 0
annotation_dir = os.path.join(data_dir,
'training',
'label_2')
image_dir = os.path.join(data_dir,
'data_object_image_2',
'training',
'image_2')
train_writer = tf.python_io.TFRecordWriter('%s_train.tfrecord'%
output_path)
val_writer = tf.python_io.TFRecordWriter('%s_val.tfrecord'%
output_path)
images = sorted(tf.gfile.ListDirectory(image_dir))
for img_name in images:
img_num = int(img_name.split('.')[0])
is_validation_img = img_num < validation_set_size
img_anno = read_annotation_file(os.path.join(annotation_dir,
str(img_num).zfill(6)+'.txt'))
image_path = os.path.join(image_dir, img_name)
# Filter all bounding boxes of this frame that are of a legal class, and
# don't overlap with a dontcare region.
# TODO(talremez) filter out targets that are truncated or heavily occluded.
annotation_for_image = filter_annotations(img_anno, classes_to_use)
example = prepare_example(image_path, annotation_for_image, label_map_dict)
if is_validation_img:
val_writer.write(example.SerializeToString())
val_count += 1
else:
train_writer.write(example.SerializeToString())
train_count += 1
train_writer.close()
val_writer.close()
def prepare_example(image_path, annotations, label_map_dict):
"""Converts a dictionary with annotations for an image to tf.Example proto.
Args:
image_path: The complete path to image.
annotations: A dictionary representing the annotation of a single object
that appears in the image.
label_map_dict: A map from string label names to integer ids.
Returns:
example: The converted tf.Example.
"""
with tf.gfile.GFile(image_path, 'rb') as fid:
encoded_png = fid.read()
encoded_png_io = io.BytesIO(encoded_png)
image = pil.open(encoded_png_io)
image = np.asarray(image)
key = hashlib.sha256(encoded_png).hexdigest()
width = int(image.shape[1])
height = int(image.shape[0])
xmin_norm = annotations['2d_bbox_left'] / float(width)
ymin_norm = annotations['2d_bbox_top'] / float(height)
xmax_norm = annotations['2d_bbox_right'] / float(width)
ymax_norm = annotations['2d_bbox_bottom'] / float(height)
difficult_obj = [0]*len(xmin_norm)
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_png),
'image/format': dataset_util.bytes_feature('png'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin_norm),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax_norm),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin_norm),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax_norm),
'image/object/class/text': dataset_util.bytes_list_feature(
[x.encode('utf8') for x in annotations['type']]),
'image/object/class/label': dataset_util.int64_list_feature(
[label_map_dict[x] for x in annotations['type']]),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.float_list_feature(
annotations['truncated']),
'image/object/alpha': dataset_util.float_list_feature(
annotations['alpha']),
'image/object/3d_bbox/height': dataset_util.float_list_feature(
annotations['3d_bbox_height']),
'image/object/3d_bbox/width': dataset_util.float_list_feature(
annotations['3d_bbox_width']),
'image/object/3d_bbox/length': dataset_util.float_list_feature(
annotations['3d_bbox_length']),
'image/object/3d_bbox/x': dataset_util.float_list_feature(
annotations['3d_bbox_x']),
'image/object/3d_bbox/y': dataset_util.float_list_feature(
annotations['3d_bbox_y']),
'image/object/3d_bbox/z': dataset_util.float_list_feature(
annotations['3d_bbox_z']),
'image/object/3d_bbox/rot_y': dataset_util.float_list_feature(
annotations['3d_bbox_rot_y']),
}))
return example
def filter_annotations(img_all_annotations, used_classes):
"""Filters out annotations from the unused classes and dontcare regions.
Filters out the annotations that belong to classes we do now wish to use and
(optionally) also removes all boxes that overlap with dontcare regions.
Args:
img_all_annotations: A list of annotation dictionaries. See documentation of
read_annotation_file for more details about the format of the annotations.
used_classes: A list of strings listing the classes we want to keep, if the
list contains "dontcare", all bounding boxes with overlapping with dont
care regions will also be filtered out.
Returns:
img_filtered_annotations: A list of annotation dictionaries that have passed
the filtering.
"""
img_filtered_annotations = {}
# Filter the type of the objects.
relevant_annotation_indices = [
i for i, x in enumerate(img_all_annotations['type']) if x in used_classes
]
for key in img_all_annotations.keys():
img_filtered_annotations[key] = (
img_all_annotations[key][relevant_annotation_indices])
if 'dontcare' in used_classes:
dont_care_indices = [i for i,
x in enumerate(img_filtered_annotations['type'])
if x == 'dontcare']
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = np.stack([img_filtered_annotations['2d_bbox_top'],
img_filtered_annotations['2d_bbox_left'],
img_filtered_annotations['2d_bbox_bottom'],
img_filtered_annotations['2d_bbox_right']],
axis=1)
ious = iou(boxes1=all_boxes,
boxes2=all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > 0.0
for key in img_all_annotations.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def read_annotation_file(filename):
"""Reads a KITTI annotation file.
Converts a KITTI annotation file into a dictionary containing all the
relevant information.
Args:
filename: the path to the annotataion text file.
Returns:
anno: A dictionary with the converted annotation information. See annotation
README file for details on the different fields.
"""
with open(filename) as f:
content = f.readlines()
content = [x.strip().split(' ') for x in content]
anno = {}
anno['type'] = np.array([x[0].lower() for x in content])
anno['truncated'] = np.array([float(x[1]) for x in content])
anno['occluded'] = np.array([int(x[2]) for x in content])
anno['alpha'] = np.array([float(x[3]) for x in content])
anno['2d_bbox_left'] = np.array([float(x[4]) for x in content])
anno['2d_bbox_top'] = np.array([float(x[5]) for x in content])
anno['2d_bbox_right'] = np.array([float(x[6]) for x in content])
anno['2d_bbox_bottom'] = np.array([float(x[7]) for x in content])
anno['3d_bbox_height'] = np.array([float(x[8]) for x in content])
anno['3d_bbox_width'] = np.array([float(x[9]) for x in content])
anno['3d_bbox_length'] = np.array([float(x[10]) for x in content])
anno['3d_bbox_x'] = np.array([float(x[11]) for x in content])
anno['3d_bbox_y'] = np.array([float(x[12]) for x in content])
anno['3d_bbox_z'] = np.array([float(x[13]) for x in content])
anno['3d_bbox_rot_y'] = np.array([float(x[14]) for x in content])
return anno
def main(_):
convert_kitti_to_tfrecords(
data_dir=FLAGS.data_dir,
output_path=FLAGS.output_path,
classes_to_use=FLAGS.classes_to_use.split(','),
label_map_path=FLAGS.label_map_path,
validation_set_size=FLAGS.validation_set_size)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_kitti_tf_record.py | create_kitti_tf_record.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_kitti_tf_record.py."""
import os
import numpy as np
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import create_kitti_tf_record
class CreateKittiTFRecordTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def test_dict_to_tf_example(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
annotations = {}
annotations['2d_bbox_left'] = np.array([64])
annotations['2d_bbox_top'] = np.array([64])
annotations['2d_bbox_right'] = np.array([192])
annotations['2d_bbox_bottom'] = np.array([192])
annotations['type'] = ['car']
annotations['truncated'] = np.array([1])
annotations['alpha'] = np.array([2])
annotations['3d_bbox_height'] = np.array([10])
annotations['3d_bbox_width'] = np.array([11])
annotations['3d_bbox_length'] = np.array([12])
annotations['3d_bbox_x'] = np.array([13])
annotations['3d_bbox_y'] = np.array([14])
annotations['3d_bbox_z'] = np.array([15])
annotations['3d_bbox_rot_y'] = np.array([4])
label_map_dict = {
'background': 0,
'car': 1,
}
example = create_kitti_tf_record.prepare_example(
save_path,
annotations,
label_map_dict)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(save_path)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(save_path)])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('png')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('car')])
self._assertProtoEqual(
example.features.feature['image/object/class/label'].int64_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/truncated'].float_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/alpha'].float_list.value,
[2])
self._assertProtoEqual(example.features.feature[
'image/object/3d_bbox/height'].float_list.value, [10])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/width'].float_list.value,
[11])
self._assertProtoEqual(example.features.feature[
'image/object/3d_bbox/length'].float_list.value, [12])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/x'].float_list.value,
[13])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/y'].float_list.value,
[14])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/z'].float_list.value,
[15])
self._assertProtoEqual(
example.features.feature['image/object/3d_bbox/rot_y'].float_list.value,
[4])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_kitti_tf_record_test.py | create_kitti_tf_record_test.py |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.seq_example_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import seq_example_util
from object_detection.utils import tf_version
class SeqExampleUtilTest(tf.test.TestCase):
def materialize_tensors(self, list_of_tensors):
if tf_version.is_tf2():
return [tensor.numpy() for tensor in list_of_tensors]
else:
with self.cached_session() as sess:
return sess.run(list_of_tensors)
def test_make_unlabeled_example(self):
num_frames = 5
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
image_source_ids = [str(idx) for idx in range(num_frames)]
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=image_source_ids)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
0,
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames - 1,
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
self.assertEqual(
3,
context_feature_dict['image/channels'].int64_list.value[0])
self.assertEqual(
b'JPEG',
context_feature_dict['image/format'].bytes_list.value[0])
self.assertEqual(
image_height,
context_feature_dict['image/height'].int64_list.value[0])
self.assertEqual(
image_width,
context_feature_dict['image/width'].int64_list.value[0])
self.assertEqual(
video_id,
context_feature_dict['clip/media_id'].bytes_list.value[0])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual(list(range(num_frames)), timestamps)
source_ids = [
feature.bytes_list.value[0] for feature
in seq_feature_dict['image/source_id'].feature]
self.assertAllEqual(
[six.ensure_binary(str(idx)) for idx in range(num_frames)],
source_ids)
def test_make_labeled_example(self):
num_frames = 3
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
labels = [b'dog', b'cat', b'wolf']
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
timestamps = [100000, 110000, 120000]
is_annotated = [1, 0, 1]
bboxes = [
np.array([[0., 0., 0., 0.],
[0., 0., 1., 1.]], dtype=np.float32),
np.zeros([0, 4], dtype=np.float32),
np.array([], dtype=np.float32)
]
label_strings = [
np.array(labels),
np.array([]),
np.array([])
]
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
timestamps=timestamps,
is_annotated=is_annotated,
bboxes=bboxes,
label_strings=label_strings)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
timestamps[0],
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
timestamps[-1],
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual(timestamps, actual_timestamps)
# Frame 0.
self.assertAllEqual(
is_annotated[0],
seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
labels,
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
# Frame 1.
self.assertAllEqual(
is_annotated[1],
seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
def test_make_labeled_example_with_context_features(self):
num_frames = 2
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
labels = [b'dog', b'cat']
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
timestamps = [100000, 110000]
is_annotated = [1, 0]
bboxes = [
np.array([[0., 0., 0., 0.],
[0., 0., 1., 1.]], dtype=np.float32),
np.zeros([0, 4], dtype=np.float32)
]
label_strings = [
np.array(labels),
np.array([])
]
context_features = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
context_feature_length = [3]
context_features_image_id_list = [b'im_1', b'im_2']
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
timestamps=timestamps,
is_annotated=is_annotated,
bboxes=bboxes,
label_strings=label_strings,
context_features=context_features,
context_feature_length=context_feature_length,
context_features_image_id_list=context_features_image_id_list)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
timestamps[0],
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
timestamps[-1],
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
self.assertAllClose(
context_features,
context_feature_dict['image/context_features'].float_list.value[:])
self.assertEqual(
context_feature_length[0],
context_feature_dict[
'image/context_feature_length'].int64_list.value[0])
self.assertEqual(
context_features_image_id_list,
context_feature_dict[
'image/context_features_image_id_list'].bytes_list.value[:])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual(timestamps, actual_timestamps)
# Frame 0.
self.assertAllEqual(
is_annotated[0],
seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 1.],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
labels,
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
# Frame 1.
self.assertAllEqual(
is_annotated[1],
seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
def test_make_labeled_example_with_predictions(self):
num_frames = 2
image_height = 100
image_width = 200
dataset_name = b'unlabeled_dataset'
video_id = b'video_000'
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.materialize_tensors(encoded_images_list)
bboxes = [
np.array([[0., 0., 0.75, 0.75],
[0., 0., 1., 1.]], dtype=np.float32),
np.array([[0., 0.25, 0.5, 0.75]], dtype=np.float32)
]
label_strings = [
np.array(['cat', 'frog']),
np.array(['cat'])
]
detection_bboxes = [
np.array([[0., 0., 0.75, 0.75]], dtype=np.float32),
np.zeros([0, 4], dtype=np.float32)
]
detection_classes = [
np.array([5], dtype=np.int64),
np.array([], dtype=np.int64)
]
detection_scores = [
np.array([0.9], dtype=np.float32),
np.array([], dtype=np.float32)
]
seq_example = seq_example_util.make_sequence_example(
dataset_name=dataset_name,
video_id=video_id,
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
bboxes=bboxes,
label_strings=label_strings,
detection_bboxes=detection_bboxes,
detection_classes=detection_classes,
detection_scores=detection_scores)
context_feature_dict = seq_example.context.feature
self.assertEqual(
dataset_name,
context_feature_dict['example/dataset_name'].bytes_list.value[0])
self.assertEqual(
0,
context_feature_dict['clip/start/timestamp'].int64_list.value[0])
self.assertEqual(
1,
context_feature_dict['clip/end/timestamp'].int64_list.value[0])
self.assertEqual(
num_frames,
context_feature_dict['clip/frames'].int64_list.value[0])
seq_feature_dict = seq_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
self.assertAllEqual([0, 1], actual_timestamps)
# Frame 0.
self.assertAllEqual(
1,
seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0., 0.],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75, 1.],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75, 1.],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
[b'cat', b'frog'],
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
self.assertAllClose(
[0.],
seq_feature_dict[
'predicted/region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.],
seq_feature_dict[
'predicted/region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75],
seq_feature_dict[
'predicted/region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0.75],
seq_feature_dict[
'predicted/region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
[5],
seq_feature_dict[
'predicted/region/label/index'].feature[0].int64_list.value[:])
self.assertAllClose(
[0.9],
seq_feature_dict[
'predicted/region/label/confidence'].feature[0].float_list.value[:])
# Frame 1.
self.assertAllEqual(
1,
seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0])
self.assertAllClose(
[0.0],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.25],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.5],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[0.75],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[b'cat'],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[],
seq_feature_dict[
'predicted/region/label/index'].feature[1].int64_list.value[:])
self.assertAllClose(
[],
seq_feature_dict[
'predicted/region/label/confidence'].feature[1].float_list.value[:])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/seq_example_util_test.py | seq_example_util_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw COCO dataset to TFRecord for object_detection.
This tool supports data generation for object detection (boxes, masks),
keypoint detection, and DensePose.
Please note that this tool creates sharded output files.
Example usage:
python create_coco_tf_record.py --logtostderr \
--train_image_dir="${TRAIN_IMAGE_DIR}" \
--val_image_dir="${VAL_IMAGE_DIR}" \
--test_image_dir="${TEST_IMAGE_DIR}" \
--train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--val_annotations_file="${VAL_ANNOTATIONS_FILE}" \
--testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \
--output_dir="${OUTPUT_DIR}"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import json
import logging
import os
import contextlib2
import numpy as np
import PIL.Image
from pycocotools import mask
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
tf.flags.DEFINE_boolean(
'include_masks', False, 'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
tf.flags.DEFINE_string('train_image_dir', '', 'Training image directory.')
tf.flags.DEFINE_string('val_image_dir', '', 'Validation image directory.')
tf.flags.DEFINE_string('test_image_dir', '', 'Test image directory.')
tf.flags.DEFINE_string('train_annotations_file', '',
'Training annotations JSON file.')
tf.flags.DEFINE_string('val_annotations_file', '',
'Validation annotations JSON file.')
tf.flags.DEFINE_string('testdev_annotations_file', '',
'Test-dev annotations JSON file.')
tf.flags.DEFINE_string('train_keypoint_annotations_file', '',
'Training annotations JSON file.')
tf.flags.DEFINE_string('val_keypoint_annotations_file', '',
'Validation annotations JSON file.')
# DensePose is only available for coco 2014.
tf.flags.DEFINE_string('train_densepose_annotations_file', '',
'Training annotations JSON file for DensePose.')
tf.flags.DEFINE_string('val_densepose_annotations_file', '',
'Validation annotations JSON file for DensePose.')
tf.flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.')
# Whether to only produce images/annotations on person class (for keypoint /
# densepose task).
tf.flags.DEFINE_boolean('remove_non_person_annotations', False, 'Whether to '
'remove all annotations for non-person objects.')
tf.flags.DEFINE_boolean('remove_non_person_images', False, 'Whether to '
'remove all examples that do not contain a person.')
FLAGS = flags.FLAGS
logger = tf.get_logger()
logger.setLevel(logging.INFO)
_COCO_KEYPOINT_NAMES = [
b'nose', b'left_eye', b'right_eye', b'left_ear', b'right_ear',
b'left_shoulder', b'right_shoulder', b'left_elbow', b'right_elbow',
b'left_wrist', b'right_wrist', b'left_hip', b'right_hip',
b'left_knee', b'right_knee', b'left_ankle', b'right_ankle'
]
_COCO_PART_NAMES = [
b'torso_back', b'torso_front', b'right_hand', b'left_hand', b'left_foot',
b'right_foot', b'right_upper_leg_back', b'left_upper_leg_back',
b'right_upper_leg_front', b'left_upper_leg_front', b'right_lower_leg_back',
b'left_lower_leg_back', b'right_lower_leg_front', b'left_lower_leg_front',
b'left_upper_arm_back', b'right_upper_arm_back', b'left_upper_arm_front',
b'right_upper_arm_front', b'left_lower_arm_back', b'right_lower_arm_back',
b'left_lower_arm_front', b'right_lower_arm_front', b'right_face',
b'left_face',
]
_DP_PART_ID_OFFSET = 1
def clip_to_unit(x):
return min(max(x, 0.0), 1.0)
def create_tf_example(image,
annotations_list,
image_dir,
category_index,
include_masks=False,
keypoint_annotations_dict=None,
densepose_annotations_dict=None,
remove_non_person_annotations=False,
remove_non_person_images=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
annotations_list:
list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box
coordinates in the official COCO dataset are given as [x, y, width,
height] tuples using absolute coordinates where x, y represent the
top-left (0-indexed) corner. This function converts to the format
expected by the Tensorflow Object Detection API (which is which is
[ymin, xmin, ymax, xmax] with coordinates normalized relative to image
size).
image_dir: directory containing the image files.
category_index: a dict containing COCO category information keyed by the
'id' field of each category. See the label_map_util.create_category_index
function.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
keypoint_annotations_dict: A dictionary that maps from annotation_id to a
dictionary with keys: [u'keypoints', u'num_keypoints'] represeting the
keypoint information for this person object annotation. If None, then
no keypoint annotations will be populated.
densepose_annotations_dict: A dictionary that maps from annotation_id to a
dictionary with keys: [u'dp_I', u'dp_x', u'dp_y', 'dp_U', 'dp_V']
representing part surface coordinates. For more information see
http://densepose.org/.
remove_non_person_annotations: Whether to remove any annotations that are
not the "person" class.
remove_non_person_images: Whether to remove any images that do not contain
at least one "person" annotation.
Returns:
key: SHA256 hash of the image.
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
num_keypoint_annotation_skipped: Number of keypoint annotations that were
skipped.
num_densepose_annotation_skipped: Number of DensePose annotations that were
skipped.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
keypoints_x = []
keypoints_y = []
keypoints_visibility = []
keypoints_name = []
num_keypoints = []
include_keypoint = keypoint_annotations_dict is not None
num_annotations_skipped = 0
num_keypoint_annotation_used = 0
num_keypoint_annotation_skipped = 0
dp_part_index = []
dp_x = []
dp_y = []
dp_u = []
dp_v = []
dp_num_points = []
densepose_keys = ['dp_I', 'dp_U', 'dp_V', 'dp_x', 'dp_y', 'bbox']
include_densepose = densepose_annotations_dict is not None
num_densepose_annotation_used = 0
num_densepose_annotation_skipped = 0
for object_annotations in annotations_list:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
category_id = int(object_annotations['category_id'])
category_name = category_index[category_id]['name'].encode('utf8')
if remove_non_person_annotations and category_name != b'person':
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
is_crowd.append(object_annotations['iscrowd'])
category_ids.append(category_id)
category_names.append(category_name)
area.append(object_annotations['area'])
if include_masks:
run_len_encoding = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
binary_mask = mask.decode(run_len_encoding)
if not object_annotations['iscrowd']:
binary_mask = np.amax(binary_mask, axis=2)
pil_image = PIL.Image.fromarray(binary_mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
encoded_mask_png.append(output_io.getvalue())
if include_keypoint:
annotation_id = object_annotations['id']
if annotation_id in keypoint_annotations_dict:
num_keypoint_annotation_used += 1
keypoint_annotations = keypoint_annotations_dict[annotation_id]
keypoints = keypoint_annotations['keypoints']
num_kpts = keypoint_annotations['num_keypoints']
keypoints_x_abs = keypoints[::3]
keypoints_x.extend(
[float(x_abs) / image_width for x_abs in keypoints_x_abs])
keypoints_y_abs = keypoints[1::3]
keypoints_y.extend(
[float(y_abs) / image_height for y_abs in keypoints_y_abs])
keypoints_visibility.extend(keypoints[2::3])
keypoints_name.extend(_COCO_KEYPOINT_NAMES)
num_keypoints.append(num_kpts)
else:
keypoints_x.extend([0.0] * len(_COCO_KEYPOINT_NAMES))
keypoints_y.extend([0.0] * len(_COCO_KEYPOINT_NAMES))
keypoints_visibility.extend([0] * len(_COCO_KEYPOINT_NAMES))
keypoints_name.extend(_COCO_KEYPOINT_NAMES)
num_keypoints.append(0)
if include_densepose:
annotation_id = object_annotations['id']
if (annotation_id in densepose_annotations_dict and
all(key in densepose_annotations_dict[annotation_id]
for key in densepose_keys)):
dp_annotations = densepose_annotations_dict[annotation_id]
num_densepose_annotation_used += 1
dp_num_points.append(len(dp_annotations['dp_I']))
dp_part_index.extend([int(i - _DP_PART_ID_OFFSET)
for i in dp_annotations['dp_I']])
# DensePose surface coordinates are defined on a [256, 256] grid
# relative to each instance box (i.e. absolute coordinates in range
# [0., 256.]). The following converts the coordinates
# so that they are expressed in normalized image coordinates.
dp_x_box_rel = [
clip_to_unit(val / 256.) for val in dp_annotations['dp_x']]
dp_x_norm = [(float(x) + x_box_rel * width) / image_width
for x_box_rel in dp_x_box_rel]
dp_y_box_rel = [
clip_to_unit(val / 256.) for val in dp_annotations['dp_y']]
dp_y_norm = [(float(y) + y_box_rel * height) / image_height
for y_box_rel in dp_y_box_rel]
dp_x.extend(dp_x_norm)
dp_y.extend(dp_y_norm)
dp_u.extend(dp_annotations['dp_U'])
dp_v.extend(dp_annotations['dp_V'])
else:
dp_num_points.append(0)
if (remove_non_person_images and
not any(name == b'person' for name in category_names)):
return (key, None, num_annotations_skipped,
num_keypoint_annotation_skipped, num_densepose_annotation_skipped)
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
}
if include_masks:
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png))
if include_keypoint:
feature_dict['image/object/keypoint/x'] = (
dataset_util.float_list_feature(keypoints_x))
feature_dict['image/object/keypoint/y'] = (
dataset_util.float_list_feature(keypoints_y))
feature_dict['image/object/keypoint/num'] = (
dataset_util.int64_list_feature(num_keypoints))
feature_dict['image/object/keypoint/visibility'] = (
dataset_util.int64_list_feature(keypoints_visibility))
feature_dict['image/object/keypoint/text'] = (
dataset_util.bytes_list_feature(keypoints_name))
num_keypoint_annotation_skipped = (
len(keypoint_annotations_dict) - num_keypoint_annotation_used)
if include_densepose:
feature_dict['image/object/densepose/num'] = (
dataset_util.int64_list_feature(dp_num_points))
feature_dict['image/object/densepose/part_index'] = (
dataset_util.int64_list_feature(dp_part_index))
feature_dict['image/object/densepose/x'] = (
dataset_util.float_list_feature(dp_x))
feature_dict['image/object/densepose/y'] = (
dataset_util.float_list_feature(dp_y))
feature_dict['image/object/densepose/u'] = (
dataset_util.float_list_feature(dp_u))
feature_dict['image/object/densepose/v'] = (
dataset_util.float_list_feature(dp_v))
num_densepose_annotation_skipped = (
len(densepose_annotations_dict) - num_densepose_annotation_used)
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return (key, example, num_annotations_skipped,
num_keypoint_annotation_skipped, num_densepose_annotation_skipped)
def _create_tf_record_from_coco_annotations(annotations_file, image_dir,
output_path, include_masks,
num_shards,
keypoint_annotations_file='',
densepose_annotations_file='',
remove_non_person_annotations=False,
remove_non_person_images=False):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
num_shards: number of output file shards.
keypoint_annotations_file: JSON file containing the person keypoint
annotations. If empty, then no person keypoint annotations will be
generated.
densepose_annotations_file: JSON file containing the DensePose annotations.
If empty, then no DensePose annotations will be generated.
remove_non_person_annotations: Whether to remove any annotations that are
not the "person" class.
remove_non_person_images: Whether to remove any images that do not contain
at least one "person" annotation.
"""
with contextlib2.ExitStack() as tf_record_close_stack, \
tf.gfile.GFile(annotations_file, 'r') as fid:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_path, num_shards)
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
category_index = label_map_util.create_category_index(
groundtruth_data['categories'])
annotations_index = {}
if 'annotations' in groundtruth_data:
logging.info('Found groundtruth annotations. Building annotations index.')
for annotation in groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in annotations_index:
annotations_index[image_id] = []
annotations_index[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in annotations_index:
missing_annotation_count += 1
annotations_index[image_id] = []
logging.info('%d images are missing annotations.',
missing_annotation_count)
keypoint_annotations_index = {}
if keypoint_annotations_file:
with tf.gfile.GFile(keypoint_annotations_file, 'r') as kid:
keypoint_groundtruth_data = json.load(kid)
if 'annotations' in keypoint_groundtruth_data:
for annotation in keypoint_groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in keypoint_annotations_index:
keypoint_annotations_index[image_id] = {}
keypoint_annotations_index[image_id][annotation['id']] = annotation
densepose_annotations_index = {}
if densepose_annotations_file:
with tf.gfile.GFile(densepose_annotations_file, 'r') as fid:
densepose_groundtruth_data = json.load(fid)
if 'annotations' in densepose_groundtruth_data:
for annotation in densepose_groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in densepose_annotations_index:
densepose_annotations_index[image_id] = {}
densepose_annotations_index[image_id][annotation['id']] = annotation
total_num_annotations_skipped = 0
total_num_keypoint_annotations_skipped = 0
total_num_densepose_annotations_skipped = 0
for idx, image in enumerate(images):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(images))
annotations_list = annotations_index[image['id']]
keypoint_annotations_dict = None
if keypoint_annotations_file:
keypoint_annotations_dict = {}
if image['id'] in keypoint_annotations_index:
keypoint_annotations_dict = keypoint_annotations_index[image['id']]
densepose_annotations_dict = None
if densepose_annotations_file:
densepose_annotations_dict = {}
if image['id'] in densepose_annotations_index:
densepose_annotations_dict = densepose_annotations_index[image['id']]
(_, tf_example, num_annotations_skipped, num_keypoint_annotations_skipped,
num_densepose_annotations_skipped) = create_tf_example(
image, annotations_list, image_dir, category_index, include_masks,
keypoint_annotations_dict, densepose_annotations_dict,
remove_non_person_annotations, remove_non_person_images)
total_num_annotations_skipped += num_annotations_skipped
total_num_keypoint_annotations_skipped += num_keypoint_annotations_skipped
total_num_densepose_annotations_skipped += (
num_densepose_annotations_skipped)
shard_idx = idx % num_shards
if tf_example:
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
logging.info('Finished writing, skipped %d annotations.',
total_num_annotations_skipped)
if keypoint_annotations_file:
logging.info('Finished writing, skipped %d keypoint annotations.',
total_num_keypoint_annotations_skipped)
if densepose_annotations_file:
logging.info('Finished writing, skipped %d DensePose annotations.',
total_num_densepose_annotations_skipped)
def main(_):
assert FLAGS.train_image_dir, '`train_image_dir` missing.'
assert FLAGS.val_image_dir, '`val_image_dir` missing.'
assert FLAGS.test_image_dir, '`test_image_dir` missing.'
assert FLAGS.train_annotations_file, '`train_annotations_file` missing.'
assert FLAGS.val_annotations_file, '`val_annotations_file` missing.'
assert FLAGS.testdev_annotations_file, '`testdev_annotations_file` missing.'
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
train_output_path = os.path.join(FLAGS.output_dir, 'coco_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'coco_val.record')
testdev_output_path = os.path.join(FLAGS.output_dir, 'coco_testdev.record')
_create_tf_record_from_coco_annotations(
FLAGS.train_annotations_file,
FLAGS.train_image_dir,
train_output_path,
FLAGS.include_masks,
num_shards=100,
keypoint_annotations_file=FLAGS.train_keypoint_annotations_file,
densepose_annotations_file=FLAGS.train_densepose_annotations_file,
remove_non_person_annotations=FLAGS.remove_non_person_annotations,
remove_non_person_images=FLAGS.remove_non_person_images)
_create_tf_record_from_coco_annotations(
FLAGS.val_annotations_file,
FLAGS.val_image_dir,
val_output_path,
FLAGS.include_masks,
num_shards=50,
keypoint_annotations_file=FLAGS.val_keypoint_annotations_file,
densepose_annotations_file=FLAGS.val_densepose_annotations_file,
remove_non_person_annotations=FLAGS.remove_non_person_annotations,
remove_non_person_images=FLAGS.remove_non_person_images)
_create_tf_record_from_coco_annotations(
FLAGS.testdev_annotations_file,
FLAGS.test_image_dir,
testdev_output_path,
FLAGS.include_masks,
num_shards=50)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_coco_tf_record.py | create_coco_tf_record.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import oid_tfrecord_creation
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_oid_tf_record.py | create_oid_tf_record.py |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Utilities for creating TFRecords of TF examples for the Open Images dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
"""Opens all TFRecord shards for writing and adds them to an exit stack.
Args:
exit_stack: A context2.ExitStack used to automatically closed the TFRecords
opened in this function.
base_path: The base path for all shards
num_shards: The number of shards
Returns:
The list of opened TFRecords. Position k in the list corresponds to shard k.
"""
tf_record_output_filenames = [
'{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards)
for idx in range(num_shards)
]
tfrecords = [
exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name))
for file_name in tf_record_output_filenames
]
return tfrecords
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/tf_record_creation_util.py | tf_record_creation_util.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
python object_detection/dataset_tools/create_pascal_tf_record.py \
--data_dir=/home/user/VOCdevkit \
--year=VOC2012 \
--output_path=/home/user/pascal.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow.compat.v1 as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('set', 'train', 'Convert training set, validation set or '
'merged set.')
flags.DEFINE_string('annotations_dir', 'Annotations',
'(Relative) path to annotations directory.')
flags.DEFINE_string('year', 'VOC2007', 'Desired challenge year.')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('label_map_path', 'data/pascal_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '
'difficult instances')
FLAGS = flags.FLAGS
SETS = ['train', 'val', 'trainval', 'test']
YEARS = ['VOC2007', 'VOC2012', 'merged']
def dict_to_tf_example(data,
dataset_directory,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='JPEGImages'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def main(_):
if FLAGS.set not in SETS:
raise ValueError('set must be in : {}'.format(SETS))
if FLAGS.year not in YEARS:
raise ValueError('year must be in : {}'.format(YEARS))
data_dir = FLAGS.data_dir
years = ['VOC2007', 'VOC2012']
if FLAGS.year != 'merged':
years = [FLAGS.year]
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
for year in years:
logging.info('Reading from PASCAL %s dataset.', year)
examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main',
'aeroplane_' + FLAGS.set + '.txt')
annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir)
examples_list = dataset_util.read_examples_list(examples_path)
for idx, example in enumerate(examples_list):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples_list))
path = os.path.join(annotations_dir, example + '.xml')
with tf.gfile.GFile(path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict,
FLAGS.ignore_difficult_instances)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_pascal_tf_record.py | create_pascal_tf_record.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""An executable to expand image-level labels, boxes and segments.
The expansion is performed using class hierarchy, provided in JSON file.
The expected file formats are the following:
- for box and segment files: CSV file is expected to have LabelName field
- for image-level labels: CSV file is expected to have LabelName and Confidence
fields
Note, that LabelName is the only field used for expansion.
Example usage:
python models/research/object_detection/dataset_tools/\
oid_hierarchical_labels_expansion.py \
--json_hierarchy_file=<path to JSON hierarchy> \
--input_annotations=<input csv file> \
--output_annotations=<output csv file> \
--annotation_type=<1 (for boxes and segments) or 2 (for image-level labels)>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
from absl import app
from absl import flags
import six
flags.DEFINE_string(
'json_hierarchy_file', None,
'Path to the file containing label hierarchy in JSON format.')
flags.DEFINE_string(
'input_annotations', None, 'Path to Open Images annotations file'
'(either bounding boxes, segments or image-level labels).')
flags.DEFINE_string('output_annotations', None, 'Path to the output file.')
flags.DEFINE_integer(
'annotation_type', None,
'Type of the input annotations: 1 - boxes or segments,'
'2 - image-level labels.'
)
FLAGS = flags.FLAGS
def _update_dict(initial_dict, update):
"""Updates dictionary with update content.
Args:
initial_dict: initial dictionary.
update: updated dictionary.
"""
for key, value_list in update.items():
if key in initial_dict:
initial_dict[key].update(value_list)
else:
initial_dict[key] = set(value_list)
def _build_plain_hierarchy(hierarchy, skip_root=False):
"""Expands tree hierarchy representation to parent-child dictionary.
Args:
hierarchy: labels hierarchy as JSON file.
skip_root: if true skips root from the processing (done for the case when all
classes under hierarchy are collected under virtual node).
Returns:
keyed_parent - dictionary of parent - all its children nodes.
keyed_child - dictionary of children - all its parent nodes
children - all children of the current node.
"""
all_children = set([])
all_keyed_parent = {}
all_keyed_child = {}
if 'Subcategory' in hierarchy:
for node in hierarchy['Subcategory']:
keyed_parent, keyed_child, children = _build_plain_hierarchy(node)
# Update is not done through dict.update() since some children have multi-
# ple parents in the hiearchy.
_update_dict(all_keyed_parent, keyed_parent)
_update_dict(all_keyed_child, keyed_child)
all_children.update(children)
if not skip_root:
all_keyed_parent[hierarchy['LabelName']] = copy.deepcopy(all_children)
all_children.add(hierarchy['LabelName'])
for child, _ in all_keyed_child.items():
all_keyed_child[child].add(hierarchy['LabelName'])
all_keyed_child[hierarchy['LabelName']] = set([])
return all_keyed_parent, all_keyed_child, all_children
class OIDHierarchicalLabelsExpansion(object):
""" Main class to perform labels hierachical expansion."""
def __init__(self, hierarchy):
"""Constructor.
Args:
hierarchy: labels hierarchy as JSON object.
"""
self._hierarchy_keyed_parent, self._hierarchy_keyed_child, _ = (
_build_plain_hierarchy(hierarchy, skip_root=True))
def expand_boxes_or_segments_from_csv(self, csv_row,
labelname_column_index=1):
"""Expands a row containing bounding boxes/segments from CSV file.
Args:
csv_row: a single row of Open Images released groundtruth file.
labelname_column_index: 0-based index of LabelName column in CSV file.
Returns:
a list of strings (including the initial row) corresponding to the ground
truth expanded to multiple annotation for evaluation with Open Images
Challenge 2018/2019 metrics.
"""
# Row header is expected to be the following for boxes:
# ImageID,LabelName,Confidence,XMin,XMax,YMin,YMax,IsGroupOf
# Row header is expected to be the following for segments:
# ImageID,LabelName,ImageWidth,ImageHeight,XMin,XMax,YMin,YMax,
# IsGroupOf,Mask
split_csv_row = six.ensure_str(csv_row).split(',')
result = [csv_row]
assert split_csv_row[
labelname_column_index] in self._hierarchy_keyed_child
parent_nodes = self._hierarchy_keyed_child[
split_csv_row[labelname_column_index]]
for parent_node in parent_nodes:
split_csv_row[labelname_column_index] = parent_node
result.append(','.join(split_csv_row))
return result
def expand_labels_from_csv(self,
csv_row,
labelname_column_index=1,
confidence_column_index=2):
"""Expands a row containing labels from CSV file.
Args:
csv_row: a single row of Open Images released groundtruth file.
labelname_column_index: 0-based index of LabelName column in CSV file.
confidence_column_index: 0-based index of Confidence column in CSV file.
Returns:
a list of strings (including the initial row) corresponding to the ground
truth expanded to multiple annotation for evaluation with Open Images
Challenge 2018/2019 metrics.
"""
# Row header is expected to be exactly:
# ImageID,Source,LabelName,Confidence
split_csv_row = six.ensure_str(csv_row).split(',')
result = [csv_row]
if int(split_csv_row[confidence_column_index]) == 1:
assert split_csv_row[
labelname_column_index] in self._hierarchy_keyed_child
parent_nodes = self._hierarchy_keyed_child[
split_csv_row[labelname_column_index]]
for parent_node in parent_nodes:
split_csv_row[labelname_column_index] = parent_node
result.append(','.join(split_csv_row))
else:
assert split_csv_row[
labelname_column_index] in self._hierarchy_keyed_parent
child_nodes = self._hierarchy_keyed_parent[
split_csv_row[labelname_column_index]]
for child_node in child_nodes:
split_csv_row[labelname_column_index] = child_node
result.append(','.join(split_csv_row))
return result
def main(unused_args):
del unused_args
with open(FLAGS.json_hierarchy_file) as f:
hierarchy = json.load(f)
expansion_generator = OIDHierarchicalLabelsExpansion(hierarchy)
labels_file = False
if FLAGS.annotation_type == 2:
labels_file = True
elif FLAGS.annotation_type != 1:
print('--annotation_type expected value is 1 or 2.')
return -1
confidence_column_index = -1
labelname_column_index = -1
with open(FLAGS.input_annotations, 'r') as source:
with open(FLAGS.output_annotations, 'w') as target:
header = source.readline()
target.writelines([header])
column_names = header.strip().split(',')
labelname_column_index = column_names.index('LabelName')
if labels_file:
confidence_column_index = column_names.index('Confidence')
for line in source:
if labels_file:
expanded_lines = expansion_generator.expand_labels_from_csv(
line, labelname_column_index, confidence_column_index)
else:
expanded_lines = (
expansion_generator.expand_boxes_or_segments_from_csv(
line, labelname_column_index))
target.writelines(expanded_lines)
if __name__ == '__main__':
flags.mark_flag_as_required('json_hierarchy_file')
flags.mark_flag_as_required('input_annotations')
flags.mark_flag_as_required('output_annotations')
flags.mark_flag_as_required('annotation_type')
app.run(main)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py | oid_hierarchical_labels_expansion.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility for object detection tf.train.SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
def context_float_feature(ndarray):
"""Converts a numpy float array to a context float feature.
Args:
ndarray: A numpy float array.
Returns:
A context float feature.
"""
feature = tf.train.Feature()
for val in ndarray:
feature.float_list.value.append(val)
return feature
def context_int64_feature(ndarray):
"""Converts a numpy array to a context int64 feature.
Args:
ndarray: A numpy int64 array.
Returns:
A context int64 feature.
"""
feature = tf.train.Feature()
for val in ndarray:
feature.int64_list.value.append(val)
return feature
def context_bytes_feature(ndarray):
"""Converts a numpy bytes array to a context bytes feature.
Args:
ndarray: A numpy bytes array.
Returns:
A context bytes feature.
"""
feature = tf.train.Feature()
for val in ndarray:
if isinstance(val, np.ndarray):
val = val.tolist()
feature.bytes_list.value.append(tf.compat.as_bytes(val))
return feature
def sequence_float_feature(ndarray):
"""Converts a numpy float array to a sequence float feature.
Args:
ndarray: A numpy float array.
Returns:
A sequence float feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
feature = feature_list.feature.add()
if row.size:
feature.float_list.value[:] = row
return feature_list
def sequence_int64_feature(ndarray):
"""Converts a numpy int64 array to a sequence int64 feature.
Args:
ndarray: A numpy int64 array.
Returns:
A sequence int64 feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
feature = feature_list.feature.add()
if row.size:
feature.int64_list.value[:] = row
return feature_list
def sequence_bytes_feature(ndarray):
"""Converts a bytes float array to a sequence bytes feature.
Args:
ndarray: A numpy bytes array.
Returns:
A sequence bytes feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
if isinstance(row, np.ndarray):
row = row.tolist()
feature = feature_list.feature.add()
if row:
row = [tf.compat.as_bytes(val) for val in row]
feature.bytes_list.value[:] = row
return feature_list
def sequence_strings_feature(strings):
new_str_arr = []
for single_str in strings:
new_str_arr.append(tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[single_str.encode('utf8')])))
return tf.train.FeatureList(feature=new_str_arr)
def boxes_to_box_components(bboxes):
"""Converts a list of numpy arrays (boxes) to box components.
Args:
bboxes: A numpy array of bounding boxes.
Returns:
Bounding box component lists.
"""
ymin_list = []
xmin_list = []
ymax_list = []
xmax_list = []
for bbox in bboxes:
if bbox != []: # pylint: disable=g-explicit-bool-comparison
bbox = np.array(bbox).astype(np.float32)
ymin, xmin, ymax, xmax = np.split(bbox, 4, axis=1)
else:
ymin, xmin, ymax, xmax = [], [], [], []
ymin_list.append(np.reshape(ymin, [-1]))
xmin_list.append(np.reshape(xmin, [-1]))
ymax_list.append(np.reshape(ymax, [-1]))
xmax_list.append(np.reshape(xmax, [-1]))
return ymin_list, xmin_list, ymax_list, xmax_list
def make_sequence_example(dataset_name,
video_id,
encoded_images,
image_height,
image_width,
image_format=None,
image_source_ids=None,
timestamps=None,
is_annotated=None,
bboxes=None,
label_strings=None,
detection_bboxes=None,
detection_classes=None,
detection_scores=None,
use_strs_for_source_id=False,
context_features=None,
context_feature_length=None,
context_features_image_id_list=None):
"""Constructs tf.SequenceExamples.
Args:
dataset_name: String with dataset name.
video_id: String with video id.
encoded_images: A [num_frames] list (or numpy array) of encoded image
frames.
image_height: Height of the images.
image_width: Width of the images.
image_format: Format of encoded images.
image_source_ids: (Optional) A [num_frames] list of unique string ids for
each image.
timestamps: (Optional) A [num_frames] list (or numpy array) array with image
timestamps.
is_annotated: (Optional) A [num_frames] list (or numpy array) array
in which each element indicates whether the frame has been annotated
(1) or not (0).
bboxes: (Optional) A list (with num_frames elements) of [num_boxes_i, 4]
numpy float32 arrays holding boxes for each frame.
label_strings: (Optional) A list (with num_frames_elements) of [num_boxes_i]
numpy string arrays holding object string labels for each frame.
detection_bboxes: (Optional) A list (with num_frames elements) of
[num_boxes_i, 4] numpy float32 arrays holding prediction boxes for each
frame.
detection_classes: (Optional) A list (with num_frames_elements) of
[num_boxes_i] numpy int64 arrays holding predicted classes for each frame.
detection_scores: (Optional) A list (with num_frames_elements) of
[num_boxes_i] numpy float32 arrays holding predicted object scores for
each frame.
use_strs_for_source_id: (Optional) Whether to write the source IDs as
strings rather than byte lists of characters.
context_features: (Optional) A list or numpy array of features to use in
Context R-CNN, of length num_context_features * context_feature_length.
context_feature_length: (Optional) The length of each context feature, used
for reshaping.
context_features_image_id_list: (Optional) A list of image ids of length
num_context_features corresponding to the context features.
Returns:
A tf.train.SequenceExample.
"""
num_frames = len(encoded_images)
image_encoded = np.expand_dims(encoded_images, axis=-1)
if timestamps is None:
timestamps = np.arange(num_frames)
image_timestamps = np.expand_dims(timestamps, axis=-1)
# Context fields.
context_dict = {
'example/dataset_name': context_bytes_feature([dataset_name]),
'clip/start/timestamp': context_int64_feature([image_timestamps[0][0]]),
'clip/end/timestamp': context_int64_feature([image_timestamps[-1][0]]),
'clip/frames': context_int64_feature([num_frames]),
'image/channels': context_int64_feature([3]),
'image/height': context_int64_feature([image_height]),
'image/width': context_int64_feature([image_width]),
'clip/media_id': context_bytes_feature([video_id])
}
# Sequence fields.
feature_list = {
'image/encoded': sequence_bytes_feature(image_encoded),
'image/timestamp': sequence_int64_feature(image_timestamps),
}
# Add optional fields.
if image_format is not None:
context_dict['image/format'] = context_bytes_feature([image_format])
if image_source_ids is not None:
if use_strs_for_source_id:
feature_list['image/source_id'] = sequence_strings_feature(
image_source_ids)
else:
feature_list['image/source_id'] = sequence_bytes_feature(image_source_ids)
if bboxes is not None:
bbox_ymin, bbox_xmin, bbox_ymax, bbox_xmax = boxes_to_box_components(bboxes)
feature_list['region/bbox/xmin'] = sequence_float_feature(bbox_xmin)
feature_list['region/bbox/xmax'] = sequence_float_feature(bbox_xmax)
feature_list['region/bbox/ymin'] = sequence_float_feature(bbox_ymin)
feature_list['region/bbox/ymax'] = sequence_float_feature(bbox_ymax)
if is_annotated is None:
is_annotated = np.ones(num_frames, dtype=np.int64)
is_annotated = np.expand_dims(is_annotated, axis=-1)
feature_list['region/is_annotated'] = sequence_int64_feature(is_annotated)
if label_strings is not None:
feature_list['region/label/string'] = sequence_bytes_feature(
label_strings)
if detection_bboxes is not None:
det_bbox_ymin, det_bbox_xmin, det_bbox_ymax, det_bbox_xmax = (
boxes_to_box_components(detection_bboxes))
feature_list['predicted/region/bbox/xmin'] = sequence_float_feature(
det_bbox_xmin)
feature_list['predicted/region/bbox/xmax'] = sequence_float_feature(
det_bbox_xmax)
feature_list['predicted/region/bbox/ymin'] = sequence_float_feature(
det_bbox_ymin)
feature_list['predicted/region/bbox/ymax'] = sequence_float_feature(
det_bbox_ymax)
if detection_classes is not None:
feature_list['predicted/region/label/index'] = sequence_int64_feature(
detection_classes)
if detection_scores is not None:
feature_list['predicted/region/label/confidence'] = sequence_float_feature(
detection_scores)
if context_features is not None:
context_dict['image/context_features'] = context_float_feature(
context_features)
if context_feature_length is not None:
context_dict['image/context_feature_length'] = context_int64_feature(
context_feature_length)
if context_features_image_id_list is not None:
context_dict['image/context_features_image_id_list'] = (
context_bytes_feature(context_features_image_id_list))
context = tf.train.Features(feature=context_dict)
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
sequence_example = tf.train.SequenceExample(
context=context,
feature_lists=feature_lists)
return sequence_example
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/seq_example_util.py | seq_example_util.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_coco_tf_record.py."""
import io
import json
import os
import numpy as np
import PIL.Image
import six
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import create_coco_tf_record
class CreateCocoTFRecordTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def _assertProtoClose(self, proto_field, expectation):
"""Helper function to assert if a proto field nearly equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertAllClose(proto_list, expectation)
def test_create_tf_example(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(256, 256, 3)
tmp_dir = self.get_temp_dir()
save_path = os.path.join(tmp_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 256,
'width': 256,
'id': 11,
}
annotations_list = [{
'area': .5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 2,
'id': 1000,
}]
image_dir = tmp_dir
category_index = {
1: {
'name': 'dog',
'id': 1
},
2: {
'name': 'cat',
'id': 2
},
3: {
'name': 'human',
'id': 3
}
}
(_, example,
num_annotations_skipped, _, _) = create_coco_tf_record.create_tf_example(
image, annotations_list, image_dir, category_index)
self.assertEqual(num_annotations_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('cat')])
def test_create_tf_example_with_instance_masks(self):
image_file_name = 'tmp_image.jpg'
image_data = np.random.rand(8, 8, 3)
tmp_dir = self.get_temp_dir()
save_path = os.path.join(tmp_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 8,
'width': 8,
'id': 11,
}
annotations_list = [{
'area': .5,
'iscrowd': False,
'image_id': 11,
'bbox': [0, 0, 8, 8],
'segmentation': [[4, 0, 0, 0, 0, 4], [8, 4, 4, 8, 8, 8]],
'category_id': 1,
'id': 1000,
}]
image_dir = tmp_dir
category_index = {
1: {
'name': 'dog',
'id': 1
},
}
(_, example,
num_annotations_skipped, _, _) = create_coco_tf_record.create_tf_example(
image, annotations_list, image_dir, category_index, include_masks=True)
self.assertEqual(num_annotations_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [8])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [8])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[1])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('dog')])
encoded_mask_pngs = [
io.BytesIO(encoded_masks) for encoded_masks in example.features.feature[
'image/object/mask'].bytes_list.value
]
pil_masks = [
np.array(PIL.Image.open(encoded_mask_png))
for encoded_mask_png in encoded_mask_pngs
]
self.assertEqual(len(pil_masks), 1)
self.assertAllEqual(pil_masks[0],
[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]])
def test_create_tf_example_with_keypoints(self):
image_dir = self.get_temp_dir()
image_file_name = 'tmp_image.jpg'
image_data = np.random.randint(low=0, high=256, size=(256, 256, 3)).astype(
np.uint8)
save_path = os.path.join(image_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 256,
'width': 256,
'id': 11,
}
min_x, min_y = 64, 64
max_x, max_y = 128, 128
keypoints = []
num_visible_keypoints = 0
xv = []
yv = []
vv = []
for _ in range(17):
xc = min_x + int(np.random.rand()*(max_x - min_x))
yc = min_y + int(np.random.rand()*(max_y - min_y))
vis = np.random.randint(0, 3)
xv.append(xc)
yv.append(yc)
vv.append(vis)
keypoints.extend([xc, yc, vis])
num_visible_keypoints += (vis > 0)
annotations_list = [{
'area': 0.5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 1,
'id': 1000
}]
keypoint_annotations_dict = {
1000: {
'keypoints': keypoints,
'num_keypoints': num_visible_keypoints
}
}
category_index = {
1: {
'name': 'person',
'id': 1
}
}
_, example, _, num_keypoint_annotation_skipped, _ = (
create_coco_tf_record.create_tf_example(
image,
annotations_list,
image_dir,
category_index,
include_masks=False,
keypoint_annotations_dict=keypoint_annotations_dict))
self.assertEqual(num_keypoint_annotation_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('person')])
self._assertProtoClose(
example.features.feature['image/object/keypoint/x'].float_list.value,
np.array(xv, dtype=np.float32) / 256)
self._assertProtoClose(
example.features.feature['image/object/keypoint/y'].float_list.value,
np.array(yv, dtype=np.float32) / 256)
self._assertProtoEqual(
example.features.feature['image/object/keypoint/text'].bytes_list.value,
create_coco_tf_record._COCO_KEYPOINT_NAMES)
self._assertProtoEqual(
example.features.feature[
'image/object/keypoint/visibility'].int64_list.value, vv)
def test_create_tf_example_with_dense_pose(self):
image_dir = self.get_temp_dir()
image_file_name = 'tmp_image.jpg'
image_data = np.random.randint(low=0, high=256, size=(256, 256, 3)).astype(
np.uint8)
save_path = os.path.join(image_dir, image_file_name)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
image = {
'file_name': image_file_name,
'height': 256,
'width': 256,
'id': 11,
}
min_x, min_y = 64, 64
max_x, max_y = 128, 128
keypoints = []
num_visible_keypoints = 0
xv = []
yv = []
vv = []
for _ in range(17):
xc = min_x + int(np.random.rand()*(max_x - min_x))
yc = min_y + int(np.random.rand()*(max_y - min_y))
vis = np.random.randint(0, 3)
xv.append(xc)
yv.append(yc)
vv.append(vis)
keypoints.extend([xc, yc, vis])
num_visible_keypoints += (vis > 0)
annotations_list = [{
'area': 0.5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 1,
'id': 1000
}]
num_points = 45
dp_i = np.random.randint(1, 25, (num_points,)).astype(np.float32)
dp_u = np.random.randn(num_points)
dp_v = np.random.randn(num_points)
dp_x = np.random.rand(num_points)*256.
dp_y = np.random.rand(num_points)*256.
densepose_annotations_dict = {
1000: {
'dp_I': dp_i,
'dp_U': dp_u,
'dp_V': dp_v,
'dp_x': dp_x,
'dp_y': dp_y,
'bbox': [64, 64, 128, 128],
}
}
category_index = {
1: {
'name': 'person',
'id': 1
}
}
_, example, _, _, num_densepose_annotation_skipped = (
create_coco_tf_record.create_tf_example(
image,
annotations_list,
image_dir,
category_index,
include_masks=False,
densepose_annotations_dict=densepose_annotations_dict))
self.assertEqual(num_densepose_annotation_skipped, 0)
self._assertProtoEqual(
example.features.feature['image/height'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/width'].int64_list.value, [256])
self._assertProtoEqual(
example.features.feature['image/filename'].bytes_list.value,
[six.b(image_file_name)])
self._assertProtoEqual(
example.features.feature['image/source_id'].bytes_list.value,
[six.b(str(image['id']))])
self._assertProtoEqual(
example.features.feature['image/format'].bytes_list.value,
[six.b('jpeg')])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.25])
self._assertProtoEqual(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.75])
self._assertProtoEqual(
example.features.feature['image/object/class/text'].bytes_list.value,
[six.b('person')])
self._assertProtoEqual(
example.features.feature['image/object/densepose/num'].int64_list.value,
[num_points])
self.assertAllEqual(
example.features.feature[
'image/object/densepose/part_index'].int64_list.value,
dp_i.astype(np.int64) - create_coco_tf_record._DP_PART_ID_OFFSET)
self.assertAllClose(
example.features.feature['image/object/densepose/u'].float_list.value,
dp_u)
self.assertAllClose(
example.features.feature['image/object/densepose/v'].float_list.value,
dp_v)
expected_dp_x = (64 + dp_x * 128. / 256.) / 256.
expected_dp_y = (64 + dp_y * 128. / 256.) / 256.
self.assertAllClose(
example.features.feature['image/object/densepose/x'].float_list.value,
expected_dp_x)
self.assertAllClose(
example.features.feature['image/object/densepose/y'].float_list.value,
expected_dp_y)
def test_create_sharded_tf_record(self):
tmp_dir = self.get_temp_dir()
image_paths = ['tmp1_image.jpg', 'tmp2_image.jpg']
for image_path in image_paths:
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(tmp_dir, image_path)
image = PIL.Image.fromarray(image_data, 'RGB')
image.save(save_path)
images = [{
'file_name': image_paths[0],
'height': 256,
'width': 256,
'id': 11,
}, {
'file_name': image_paths[1],
'height': 256,
'width': 256,
'id': 12,
}]
annotations = [{
'area': .5,
'iscrowd': False,
'image_id': 11,
'bbox': [64, 64, 128, 128],
'category_id': 2,
'id': 1000,
}]
category_index = [{
'name': 'dog',
'id': 1
}, {
'name': 'cat',
'id': 2
}, {
'name': 'human',
'id': 3
}]
groundtruth_data = {'images': images, 'annotations': annotations,
'categories': category_index}
annotation_file = os.path.join(tmp_dir, 'annotation.json')
with open(annotation_file, 'w') as annotation_fid:
json.dump(groundtruth_data, annotation_fid)
output_path = os.path.join(tmp_dir, 'out.record')
create_coco_tf_record._create_tf_record_from_coco_annotations(
annotation_file,
tmp_dir,
output_path,
False,
2)
self.assertTrue(os.path.exists(output_path + '-00000-of-00002'))
self.assertTrue(os.path.exists(output_path + '-00001-of-00002'))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/create_coco_tf_record_test.py | create_coco_tf_record_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_embedding_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_embedding_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(prediction_dict.values()):
num_features = 100
feature_dims = 10
classifier_feature = np.ones(
(2, feature_dims, feature_dims, num_features),
dtype=np.float32).tolist()
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32),
'detection_features':
tf.constant([classifier_feature],
tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(temp.name)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateEmbeddingData(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def FloatFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(400),
'image/width': Int64Feature(600),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
'image/object/bbox/xmin': FloatFeature(0.1),
'image/object/bbox/xmax': FloatFeature(0.6),
'image/object/bbox/ymin': FloatFeature(0.0),
'image/object/bbox/ymax': FloatFeature(0.5),
'image/object/class/score': FloatFeature(0.95),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/date_captured': BytesFeature(b'2019-10-20 12:12:12')
}))
return example.SerializeToString()
def assert_expected_example(self, example, topk=False, botk=False):
# Check embeddings
if topk or botk:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
218)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[2])
else:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
109)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[1])
self.assertAllEqual(
example.features.feature['image/embedding_length'].int64_list.value,
[109])
# Check annotations
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [400])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [600])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_embedding_data_fn(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example)
def test_generate_embedding_data_with_top_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 2
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, topk=True)
def test_generate_embedding_data_with_bottom_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 0
bottom_k_embedding_count = 2
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, botk=True)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
num_shards = 1
embedding_type = 'final_box_features'
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_embedding_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
top_k_embedding_count, bottom_k_embedding_count, num_shards,
embedding_type)
p.run()
filenames = tf.io.gfile.glob(
output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py | generate_embedding_data_tf2_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A Beam job to generate detection data for camera trap images.
This tools allows to run inference with an exported Object Detection model in
`saved_model` format and produce raw detection boxes on images in tf.Examples,
with the assumption that the bounding box class label will match the image-level
class label in the tf.Example.
Steps to generate a detection dataset:
1. Use object_detection/export_inference_graph.py to get a `saved_model` for
inference. The input node must accept a tf.Example proto.
2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example
protos containing images for inference.
Example Usage:
--------------
python tensorflow_models/object_detection/export_inference_graph.py \
--alsologtostderr \
--input_type tf_example \
--pipeline_config_path path/to/detection_model.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
python generate_detection_data.py \
--alsologtostderr \
--input_tfrecord path/to/input_tfrecord@X \
--output_tfrecord path/to/output_tfrecord@X \
--model_dir path/to/exported_model_directory/saved_model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import threading
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class GenerateDetectionDataFn(beam.DoFn):
"""Generates detection data for camera trap images.
This Beam DoFn performs inference with an object detection `saved_model` and
produces detection boxes for camera trap data, matched to the
object class.
"""
session_lock = threading.Lock()
def __init__(self, model_dir, confidence_threshold):
"""Initialization function.
Args:
model_dir: A directory containing saved model.
confidence_threshold: the confidence threshold for boxes to keep
"""
self._model_dir = model_dir
self._confidence_threshold = confidence_threshold
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'detection_data_generation', 'num_tf_examples_processed')
def setup(self):
self._load_inference_model()
def _load_inference_model(self):
# Because initialization of the tf.Session is expensive we share
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
with self.session_lock:
self._detect_fn = tf.saved_model.load(self._model_dir)
def process(self, tfrecord_entry):
return self._run_inference_and_generate_detections(tfrecord_entry)
def _run_inference_and_generate_detections(self, tfrecord_entry):
input_example = tf.train.Example.FromString(tfrecord_entry)
if input_example.features.feature[
'image/object/bbox/ymin'].float_list.value:
# There are already ground truth boxes for this image, just keep them.
return [input_example]
detections = self._detect_fn.signatures['serving_default'](
(tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0)))
detection_boxes = detections['detection_boxes']
num_detections = detections['num_detections']
detection_scores = detections['detection_scores']
example = tf.train.Example()
num_detections = int(num_detections[0])
image_class_labels = input_example.features.feature[
'image/object/class/label'].int64_list.value
image_class_texts = input_example.features.feature[
'image/object/class/text'].bytes_list.value
# Ignore any images with multiple classes,
# we can't match the class to the box.
if len(image_class_labels) > 1:
return []
# Don't add boxes for images already labeled empty (for now)
if len(image_class_labels) == 1:
# Add boxes over confidence threshold.
for idx, score in enumerate(detection_scores[0]):
if score >= self._confidence_threshold and idx < num_detections:
example.features.feature[
'image/object/bbox/ymin'].float_list.value.extend([
detection_boxes[0, idx, 0]])
example.features.feature[
'image/object/bbox/xmin'].float_list.value.extend([
detection_boxes[0, idx, 1]])
example.features.feature[
'image/object/bbox/ymax'].float_list.value.extend([
detection_boxes[0, idx, 2]])
example.features.feature[
'image/object/bbox/xmax'].float_list.value.extend([
detection_boxes[0, idx, 3]])
# Add box scores and class texts and labels.
example.features.feature[
'image/object/class/score'].float_list.value.extend(
[score])
example.features.feature[
'image/object/class/label'].int64_list.value.extend(
[image_class_labels[0]])
example.features.feature[
'image/object/class/text'].bytes_list.value.extend(
[image_class_texts[0]])
# Add other essential example attributes
example.features.feature['image/encoded'].bytes_list.value.extend(
input_example.features.feature['image/encoded'].bytes_list.value)
example.features.feature['image/height'].int64_list.value.extend(
input_example.features.feature['image/height'].int64_list.value)
example.features.feature['image/width'].int64_list.value.extend(
input_example.features.feature['image/width'].int64_list.value)
example.features.feature['image/source_id'].bytes_list.value.extend(
input_example.features.feature['image/source_id'].bytes_list.value)
example.features.feature['image/location'].bytes_list.value.extend(
input_example.features.feature['image/location'].bytes_list.value)
example.features.feature['image/date_captured'].bytes_list.value.extend(
input_example.features.feature['image/date_captured'].bytes_list.value)
example.features.feature['image/class/text'].bytes_list.value.extend(
input_example.features.feature['image/class/text'].bytes_list.value)
example.features.feature['image/class/label'].int64_list.value.extend(
input_example.features.feature['image/class/label'].int64_list.value)
example.features.feature['image/seq_id'].bytes_list.value.extend(
input_example.features.feature['image/seq_id'].bytes_list.value)
example.features.feature['image/seq_num_frames'].int64_list.value.extend(
input_example.features.feature['image/seq_num_frames'].int64_list.value)
example.features.feature['image/seq_frame_num'].int64_list.value.extend(
input_example.features.feature['image/seq_frame_num'].int64_list.value)
self._num_examples_processed.inc(1)
return [example]
def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir,
confidence_threshold, num_shards):
"""Returns a Beam pipeline to run object detection inference.
Args:
pipeline: Initialized beam pipeline.
input_tfrecord: A TFRecord of tf.train.Example protos containing images.
output_tfrecord: A TFRecord of tf.train.Example protos that contain images
in the input TFRecord and the detections from the model.
model_dir: Path to `saved_model` to use for inference.
confidence_threshold: Threshold to use when keeping detection results.
num_shards: The number of output shards.
"""
input_collection = (
pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord(
input_tfrecord,
coder=beam.coders.BytesCoder()))
output_collection = input_collection | 'RunInference' >> beam.ParDo(
GenerateDetectionDataFn(model_dir, confidence_threshold))
output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle()
_ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example))
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--detection_input_tfrecord',
dest='detection_input_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format for object '
'detection.')
parser.add_argument(
'--detection_output_tfrecord',
dest='detection_output_tfrecord',
required=True,
help='TFRecord containing detections in tf.Example format.')
parser.add_argument(
'--detection_model_dir',
dest='detection_model_dir',
required=True,
help='Path to directory containing an object detection SavedModel.')
parser.add_argument(
'--confidence_threshold',
dest='confidence_threshold',
default=0.9,
help='Min confidence to keep bounding boxes.')
parser.add_argument(
'--num_shards',
dest='num_shards',
default=0,
help='Number of output shards.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.detection_output_tfrecord)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
construct_pipeline(
p,
args.detection_input_tfrecord,
args.detection_output_tfrecord,
args.detection_model_dir,
args.confidence_threshold,
args.num_shards)
p.run()
if __name__ == '__main__':
main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/generate_detection_data.py | generate_detection_data.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_detection_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_detection_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(filename)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateDetectionDataTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
with self.test_session():
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(4),
'image/width': Int64Feature(6),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
}))
return example.SerializeToString()
def assert_expected_example(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [4])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [6])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_detection_data_fn(self):
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
inference_fn = generate_detection_data.GenerateDetectionDataFn(
saved_model_path, confidence_threshold)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(generated_example)
output_example = output[0]
self.assertAllEqual(
output_example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(output_example.features.feature['image/width']
.int64_list.value, [6])
self.assert_expected_example(output_example)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
confidence_threshold = 0.8
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_detection_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
confidence_threshold, num_shards)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py | generate_detection_data_tf2_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A Beam job to add contextual memory banks to tf.Examples.
This tool groups images containing bounding boxes and embedded context features
by a key, either `image/location` or `image/seq_id`, and time horizon,
then uses these groups to build up a contextual memory bank from the embedded
context features from each image in the group and adds that context to the
output tf.Examples for each image in the group.
Steps to generate a dataset with context from one with bounding boxes and
embedded context features:
1. Use object/detection/export_inference_graph.py to get a `saved_model` for
inference. The input node must accept a tf.Example proto.
2. Run this tool with `saved_model` from step 1 and a TFRecord of tf.Example
protos containing images, bounding boxes, and embedded context features.
The context features can be added to tf.Examples using
generate_embedding_data.py.
Example Usage:
--------------
python add_context_to_examples.py \
--input_tfrecord path/to/input_tfrecords* \
--output_tfrecord path/to/output_tfrecords \
--sequence_key image/location \
--time_horizon month
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import datetime
import io
import itertools
import json
import os
import numpy as np
import PIL.Image
import six
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class ReKeyDataFn(beam.DoFn):
"""Re-keys tfrecords by sequence_key.
This Beam DoFn re-keys the tfrecords by a user-defined sequence_key
"""
def __init__(self, sequence_key, time_horizon,
reduce_image_size, max_image_dimension):
"""Initialization function.
Args:
sequence_key: A feature name to use as a key for grouping sequences.
Must point to a key of type bytes_list
time_horizon: What length of time to use to partition the data when
building the memory banks. Options: `year`, `month`, `week`, `day `,
`hour`, `minute`, None
reduce_image_size: Whether to reduce the sizes of the stored images.
max_image_dimension: maximum dimension of reduced images
"""
self._sequence_key = sequence_key
if time_horizon is None or time_horizon in {'year', 'month', 'week', 'day',
'hour', 'minute'}:
self._time_horizon = time_horizon
else:
raise ValueError('Time horizon not supported.')
self._reduce_image_size = reduce_image_size
self._max_image_dimension = max_image_dimension
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'data_rekey', 'num_tf_examples_processed')
self._num_images_resized = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_resized')
self._num_images_read = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_read')
self._num_images_found = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_read')
self._num_got_shape = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_got_shape')
self._num_images_found_size = beam.metrics.Metrics.counter(
'data_rekey', 'num_images_found_size')
self._num_examples_cleared = beam.metrics.Metrics.counter(
'data_rekey', 'num_examples_cleared')
self._num_examples_updated = beam.metrics.Metrics.counter(
'data_rekey', 'num_examples_updated')
def process(self, tfrecord_entry):
return self._rekey_examples(tfrecord_entry)
def _largest_size_at_most(self, height, width, largest_side):
"""Computes new shape with the largest side equal to `largest_side`.
Args:
height: an int indicating the current height.
width: an int indicating the current width.
largest_side: A python integer indicating the size of
the largest side after resize.
Returns:
new_height: an int indicating the new height.
new_width: an int indicating the new width.
"""
x_scale = float(largest_side) / float(width)
y_scale = float(largest_side) / float(height)
scale = min(x_scale, y_scale)
new_width = int(width * scale)
new_height = int(height * scale)
return new_height, new_width
def _resize_image(self, input_example):
"""Resizes the image within input_example and updates the height and width.
Args:
input_example: A tf.Example that we want to update to contain a resized
image.
Returns:
input_example: Updated tf.Example.
"""
original_image = copy.deepcopy(
input_example.features.feature['image/encoded'].bytes_list.value[0])
self._num_images_read.inc(1)
height = copy.deepcopy(
input_example.features.feature['image/height'].int64_list.value[0])
width = copy.deepcopy(
input_example.features.feature['image/width'].int64_list.value[0])
self._num_got_shape.inc(1)
new_height, new_width = self._largest_size_at_most(
height, width, self._max_image_dimension)
self._num_images_found_size.inc(1)
encoded_jpg_io = io.BytesIO(original_image)
image = PIL.Image.open(encoded_jpg_io)
resized_image = image.resize((new_width, new_height))
with io.BytesIO() as output:
resized_image.save(output, format='JPEG')
encoded_resized_image = output.getvalue()
self._num_images_resized.inc(1)
del input_example.features.feature['image/encoded'].bytes_list.value[:]
del input_example.features.feature['image/height'].int64_list.value[:]
del input_example.features.feature['image/width'].int64_list.value[:]
self._num_examples_cleared.inc(1)
input_example.features.feature['image/encoded'].bytes_list.value.extend(
[encoded_resized_image])
input_example.features.feature['image/height'].int64_list.value.extend(
[new_height])
input_example.features.feature['image/width'].int64_list.value.extend(
[new_width])
self._num_examples_updated.inc(1)
return input_example
def _rekey_examples(self, tfrecord_entry):
serialized_example = copy.deepcopy(tfrecord_entry)
input_example = tf.train.Example.FromString(serialized_example)
self._num_images_found.inc(1)
if self._reduce_image_size:
input_example = self._resize_image(input_example)
self._num_images_resized.inc(1)
new_key = input_example.features.feature[
self._sequence_key].bytes_list.value[0]
if self._time_horizon:
date_captured = datetime.datetime.strptime(
six.ensure_str(input_example.features.feature[
'image/date_captured'].bytes_list.value[0]), '%Y-%m-%d %H:%M:%S')
year = date_captured.year
month = date_captured.month
day = date_captured.day
week = np.floor(float(day) / float(7))
hour = date_captured.hour
minute = date_captured.minute
if self._time_horizon == 'year':
new_key = new_key + six.ensure_binary('/' + str(year))
elif self._time_horizon == 'month':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month))
elif self._time_horizon == 'week':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(week))
elif self._time_horizon == 'day':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(day))
elif self._time_horizon == 'hour':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + (
str(hour)))
elif self._time_horizon == 'minute':
new_key = new_key + six.ensure_binary(
'/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + (
str(hour) + '/' + str(minute)))
self._num_examples_processed.inc(1)
return [(new_key, input_example)]
class SortGroupedDataFn(beam.DoFn):
"""Sorts data within a keyed group.
This Beam DoFn sorts the grouped list of image examples by frame_num
"""
def __init__(self, sequence_key, sorted_image_ids,
max_num_elements_in_context_features):
"""Initialization function.
Args:
sequence_key: A feature name to use as a key for grouping sequences.
Must point to a key of type bytes_list
sorted_image_ids: Whether the image ids are sortable to use as sorting
tie-breakers
max_num_elements_in_context_features: The maximum number of elements
allowed in the memory bank
"""
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'sort_group', 'num_groups_sorted')
self._too_many_elements = beam.metrics.Metrics.counter(
'sort_group', 'too_many_elements')
self._split_elements = beam.metrics.Metrics.counter(
'sort_group', 'split_elements')
self._sequence_key = six.ensure_binary(sequence_key)
self._sorted_image_ids = sorted_image_ids
self._max_num_elements_in_context_features = (
max_num_elements_in_context_features)
def process(self, grouped_entry):
return self._sort_image_examples(grouped_entry)
def _sort_image_examples(self, grouped_entry):
key, example_collection = grouped_entry
example_list = list(example_collection)
def get_frame_num(example):
return example.features.feature['image/seq_frame_num'].int64_list.value[0]
def get_date_captured(example):
return datetime.datetime.strptime(
six.ensure_str(
example.features.feature[
'image/date_captured'].bytes_list.value[0]),
'%Y-%m-%d %H:%M:%S')
def get_image_id(example):
return example.features.feature['image/source_id'].bytes_list.value[0]
if self._sequence_key == six.ensure_binary('image/seq_id'):
sorting_fn = get_frame_num
elif self._sequence_key == six.ensure_binary('image/location'):
if self._sorted_image_ids:
sorting_fn = get_image_id
else:
sorting_fn = get_date_captured
sorted_example_list = sorted(example_list, key=sorting_fn)
num_embeddings = 0
for example in sorted_example_list:
num_embeddings += example.features.feature[
'image/embedding_count'].int64_list.value[0]
self._num_examples_processed.inc(1)
# To handle cases where there are more context embeddings within
# the time horizon than the specified maximum, we split the context group
# into subsets sequentially in time, with each subset having the maximum
# number of context embeddings except the final one, which holds the
# remainder.
if num_embeddings > self._max_num_elements_in_context_features:
leftovers = sorted_example_list
output_list = []
count = 0
self._too_many_elements.inc(1)
num_embeddings = 0
max_idx = 0
for idx, example in enumerate(leftovers):
num_embeddings += example.features.feature[
'image/embedding_count'].int64_list.value[0]
if num_embeddings <= self._max_num_elements_in_context_features:
max_idx = idx
while num_embeddings > self._max_num_elements_in_context_features:
self._split_elements.inc(1)
new_key = key + six.ensure_binary('_' + str(count))
new_list = leftovers[:max_idx]
output_list.append((new_key, new_list))
leftovers = leftovers[max_idx:]
count += 1
num_embeddings = 0
max_idx = 0
for idx, example in enumerate(leftovers):
num_embeddings += example.features.feature[
'image/embedding_count'].int64_list.value[0]
if num_embeddings <= self._max_num_elements_in_context_features:
max_idx = idx
new_key = key + six.ensure_binary('_' + str(count))
output_list.append((new_key, leftovers))
else:
output_list = [(key, sorted_example_list)]
return output_list
def get_sliding_window(example_list, max_clip_length, stride_length):
"""Yields a sliding window over data from example_list.
Sliding window has width max_clip_len (n) and stride stride_len (m).
s -> (s0,s1,...s[n-1]), (s[m],s[m+1],...,s[m+n]), ...
Args:
example_list: A list of examples.
max_clip_length: The maximum length of each clip.
stride_length: The stride between each clip.
Yields:
A list of lists of examples, each with length <= max_clip_length
"""
# check if the list is too short to slide over
if len(example_list) < max_clip_length:
yield example_list
else:
starting_values = [i*stride_length for i in
range(len(example_list)) if
len(example_list) > i*stride_length]
for start in starting_values:
result = tuple(itertools.islice(example_list, start,
min(start + max_clip_length,
len(example_list))))
yield result
class GenerateContextFn(beam.DoFn):
"""Generates context data for camera trap images.
This Beam DoFn builds up contextual memory banks from groups of images and
stores them in the output tf.Example or tf.Sequence_example for each image.
"""
def __init__(self, sequence_key, add_context_features, image_ids_to_keep,
keep_context_features_image_id_list=False,
subsample_context_features_rate=0,
keep_only_positives=False,
context_features_score_threshold=0.7,
keep_only_positives_gt=False,
max_num_elements_in_context_features=5000,
pad_context_features=False,
output_type='tf_example', max_clip_length=None,
context_feature_length=2057):
"""Initialization function.
Args:
sequence_key: A feature name to use as a key for grouping sequences.
add_context_features: Whether to keep and store the contextual memory
bank.
image_ids_to_keep: A list of image ids to save, to use to build data
subsets for evaluation.
keep_context_features_image_id_list: Whether to save an ordered list of
the ids of the images in the contextual memory bank.
subsample_context_features_rate: What rate to subsample images for the
contextual memory bank.
keep_only_positives: Whether to only keep high scoring
(>context_features_score_threshold) features in the contextual memory
bank.
context_features_score_threshold: What threshold to use for keeping
features.
keep_only_positives_gt: Whether to only keep features from images that
contain objects based on the ground truth (for training).
max_num_elements_in_context_features: the maximum number of elements in
the memory bank
pad_context_features: Whether to pad the context features to a fixed size.
output_type: What type of output, tf_example of tf_sequence_example
max_clip_length: The maximum length of a sequence example, before
splitting into multiple
context_feature_length: The length of the context feature embeddings
stored in the input data.
"""
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'sequence_data_generation', 'num_seq_examples_processed')
self._num_keys_processed = beam.metrics.Metrics.counter(
'sequence_data_generation', 'num_keys_processed')
self._sequence_key = sequence_key
self._add_context_features = add_context_features
self._pad_context_features = pad_context_features
self._output_type = output_type
self._max_clip_length = max_clip_length
if six.ensure_str(image_ids_to_keep) == 'All':
self._image_ids_to_keep = None
else:
with tf.io.gfile.GFile(image_ids_to_keep) as f:
self._image_ids_to_keep = json.load(f)
self._keep_context_features_image_id_list = (
keep_context_features_image_id_list)
self._subsample_context_features_rate = subsample_context_features_rate
self._keep_only_positives = keep_only_positives
self._keep_only_positives_gt = keep_only_positives_gt
self._context_features_score_threshold = context_features_score_threshold
self._max_num_elements_in_context_features = (
max_num_elements_in_context_features)
self._context_feature_length = context_feature_length
self._images_kept = beam.metrics.Metrics.counter(
'sequence_data_generation', 'images_kept')
self._images_loaded = beam.metrics.Metrics.counter(
'sequence_data_generation', 'images_loaded')
def process(self, grouped_entry):
return self._add_context_to_example(copy.deepcopy(grouped_entry))
def _build_context_features(self, example_list):
context_features = []
context_features_image_id_list = []
count = 0
example_embedding = []
for idx, example in enumerate(example_list):
if self._subsample_context_features_rate > 0:
if (idx % self._subsample_context_features_rate) != 0:
example.features.feature[
'context_features_idx'].int64_list.value.append(
self._max_num_elements_in_context_features + 1)
continue
if self._keep_only_positives:
if example.features.feature[
'image/embedding_score'
].float_list.value[0] < self._context_features_score_threshold:
example.features.feature[
'context_features_idx'].int64_list.value.append(
self._max_num_elements_in_context_features + 1)
continue
if self._keep_only_positives_gt:
if len(example.features.feature[
'image/object/bbox/xmin'
].float_list.value) < 1:
example.features.feature[
'context_features_idx'].int64_list.value.append(
self._max_num_elements_in_context_features + 1)
continue
example_embedding = list(example.features.feature[
'image/embedding'].float_list.value)
context_features.extend(example_embedding)
num_embeddings = example.features.feature[
'image/embedding_count'].int64_list.value[0]
example_image_id = example.features.feature[
'image/source_id'].bytes_list.value[0]
for _ in range(num_embeddings):
example.features.feature[
'context_features_idx'].int64_list.value.append(count)
count += 1
context_features_image_id_list.append(example_image_id)
if not example_embedding:
example_embedding.append(np.zeros(self._context_feature_length))
feature_length = self._context_feature_length
# If the example_list is not empty and image/embedding_length is in the
# featture dict, feature_length will be assigned to that. Otherwise, it will
# be kept as default.
if example_list and (
'image/embedding_length' in example_list[0].features.feature):
feature_length = example_list[0].features.feature[
'image/embedding_length'].int64_list.value[0]
if self._pad_context_features:
while len(context_features_image_id_list) < (
self._max_num_elements_in_context_features):
context_features_image_id_list.append('')
return context_features, feature_length, context_features_image_id_list
def _add_context_to_example(self, grouped_entry):
key, example_collection = grouped_entry
list_of_examples = []
example_list = list(example_collection)
if self._add_context_features:
context_features, feature_length, context_features_image_id_list = (
self._build_context_features(example_list))
if self._image_ids_to_keep is not None:
new_example_list = []
for example in example_list:
im_id = example.features.feature['image/source_id'].bytes_list.value[0]
self._images_loaded.inc(1)
if six.ensure_str(im_id) in self._image_ids_to_keep:
self._images_kept.inc(1)
new_example_list.append(example)
if new_example_list:
example_list = new_example_list
else:
return []
if self._output_type == 'tf_sequence_example':
if self._max_clip_length is not None:
# For now, no overlap
clips = get_sliding_window(
example_list, self._max_clip_length, self._max_clip_length)
else:
clips = [example_list]
for clip_num, clip_list in enumerate(clips):
# initialize sequence example
seq_example = tf.train.SequenceExample()
video_id = six.ensure_str(key)+'_'+ str(clip_num)
seq_example.context.feature['clip/media_id'].bytes_list.value.append(
video_id.encode('utf8'))
seq_example.context.feature['clip/frames'].int64_list.value.append(
len(clip_list))
seq_example.context.feature[
'clip/start/timestamp'].int64_list.value.append(0)
seq_example.context.feature[
'clip/end/timestamp'].int64_list.value.append(len(clip_list))
seq_example.context.feature['image/format'].bytes_list.value.append(
six.ensure_binary('JPG'))
seq_example.context.feature['image/channels'].int64_list.value.append(3)
context_example = clip_list[0]
seq_example.context.feature['image/height'].int64_list.value.append(
context_example.features.feature[
'image/height'].int64_list.value[0])
seq_example.context.feature['image/width'].int64_list.value.append(
context_example.features.feature['image/width'].int64_list.value[0])
seq_example.context.feature[
'image/context_feature_length'].int64_list.value.append(
feature_length)
seq_example.context.feature[
'image/context_features'].float_list.value.extend(
context_features)
if self._keep_context_features_image_id_list:
seq_example.context.feature[
'image/context_features_image_id_list'].bytes_list.value.extend(
context_features_image_id_list)
encoded_image_list = seq_example.feature_lists.feature_list[
'image/encoded']
timestamps_list = seq_example.feature_lists.feature_list[
'image/timestamp']
context_features_idx_list = seq_example.feature_lists.feature_list[
'image/context_features_idx']
date_captured_list = seq_example.feature_lists.feature_list[
'image/date_captured']
unix_time_list = seq_example.feature_lists.feature_list[
'image/unix_time']
location_list = seq_example.feature_lists.feature_list['image/location']
image_ids_list = seq_example.feature_lists.feature_list[
'image/source_id']
gt_xmin_list = seq_example.feature_lists.feature_list[
'region/bbox/xmin']
gt_xmax_list = seq_example.feature_lists.feature_list[
'region/bbox/xmax']
gt_ymin_list = seq_example.feature_lists.feature_list[
'region/bbox/ymin']
gt_ymax_list = seq_example.feature_lists.feature_list[
'region/bbox/ymax']
gt_type_list = seq_example.feature_lists.feature_list[
'region/label/index']
gt_type_string_list = seq_example.feature_lists.feature_list[
'region/label/string']
gt_is_annotated_list = seq_example.feature_lists.feature_list[
'region/is_annotated']
for idx, example in enumerate(clip_list):
encoded_image = encoded_image_list.feature.add()
encoded_image.bytes_list.value.extend(
example.features.feature['image/encoded'].bytes_list.value)
image_id = image_ids_list.feature.add()
image_id.bytes_list.value.append(
example.features.feature['image/source_id'].bytes_list.value[0])
timestamp = timestamps_list.feature.add()
# Timestamp is currently order in the list.
timestamp.int64_list.value.extend([idx])
context_features_idx = context_features_idx_list.feature.add()
context_features_idx.int64_list.value.extend(
example.features.feature['context_features_idx'].int64_list.value)
date_captured = date_captured_list.feature.add()
date_captured.bytes_list.value.extend(
example.features.feature['image/date_captured'].bytes_list.value)
unix_time = unix_time_list.feature.add()
unix_time.float_list.value.extend(
example.features.feature['image/unix_time'].float_list.value)
location = location_list.feature.add()
location.bytes_list.value.extend(
example.features.feature['image/location'].bytes_list.value)
gt_xmin = gt_xmin_list.feature.add()
gt_xmax = gt_xmax_list.feature.add()
gt_ymin = gt_ymin_list.feature.add()
gt_ymax = gt_ymax_list.feature.add()
gt_type = gt_type_list.feature.add()
gt_type_str = gt_type_string_list.feature.add()
gt_is_annotated = gt_is_annotated_list.feature.add()
gt_is_annotated.int64_list.value.append(1)
gt_xmin.float_list.value.extend(
example.features.feature[
'image/object/bbox/xmin'].float_list.value)
gt_xmax.float_list.value.extend(
example.features.feature[
'image/object/bbox/xmax'].float_list.value)
gt_ymin.float_list.value.extend(
example.features.feature[
'image/object/bbox/ymin'].float_list.value)
gt_ymax.float_list.value.extend(
example.features.feature[
'image/object/bbox/ymax'].float_list.value)
gt_type.int64_list.value.extend(
example.features.feature[
'image/object/class/label'].int64_list.value)
gt_type_str.bytes_list.value.extend(
example.features.feature[
'image/object/class/text'].bytes_list.value)
self._num_examples_processed.inc(1)
list_of_examples.append(seq_example)
elif self._output_type == 'tf_example':
for example in example_list:
im_id = example.features.feature['image/source_id'].bytes_list.value[0]
if self._add_context_features:
example.features.feature[
'image/context_features'].float_list.value.extend(
context_features)
example.features.feature[
'image/context_feature_length'].int64_list.value.append(
feature_length)
if self._keep_context_features_image_id_list:
example.features.feature[
'image/context_features_image_id_list'].bytes_list.value.extend(
context_features_image_id_list)
self._num_examples_processed.inc(1)
list_of_examples.append(example)
return list_of_examples
def construct_pipeline(pipeline,
input_tfrecord,
output_tfrecord,
sequence_key,
time_horizon=None,
subsample_context_features_rate=0,
reduce_image_size=True,
max_image_dimension=1024,
add_context_features=True,
sorted_image_ids=True,
image_ids_to_keep='All',
keep_context_features_image_id_list=False,
keep_only_positives=False,
context_features_score_threshold=0.7,
keep_only_positives_gt=False,
max_num_elements_in_context_features=5000,
num_shards=0,
output_type='tf_example',
max_clip_length=None,
context_feature_length=2057):
"""Returns a beam pipeline to run object detection inference.
Args:
pipeline: Initialized beam pipeline.
input_tfrecord: An TFRecord of tf.train.Example protos containing images.
output_tfrecord: An TFRecord of tf.train.Example protos that contain images
in the input TFRecord and the detections from the model.
sequence_key: A feature name to use as a key for grouping sequences.
time_horizon: What length of time to use to partition the data when building
the memory banks. Options: `year`, `month`, `week`, `day `, `hour`,
`minute`, None.
subsample_context_features_rate: What rate to subsample images for the
contextual memory bank.
reduce_image_size: Whether to reduce the size of the stored images.
max_image_dimension: The maximum image dimension to use for resizing.
add_context_features: Whether to keep and store the contextual memory bank.
sorted_image_ids: Whether the image ids are sortable, and can be used as
datetime tie-breakers when building memory banks.
image_ids_to_keep: A list of image ids to save, to use to build data subsets
for evaluation.
keep_context_features_image_id_list: Whether to save an ordered list of the
ids of the images in the contextual memory bank.
keep_only_positives: Whether to only keep high scoring
(>context_features_score_threshold) features in the contextual memory
bank.
context_features_score_threshold: What threshold to use for keeping
features.
keep_only_positives_gt: Whether to only keep features from images that
contain objects based on the ground truth (for training).
max_num_elements_in_context_features: the maximum number of elements in the
memory bank
num_shards: The number of output shards.
output_type: What type of output, tf_example of tf_sequence_example
max_clip_length: The maximum length of a sequence example, before
splitting into multiple
context_feature_length: The length of the context feature embeddings stored
in the input data.
"""
if output_type == 'tf_example':
coder = beam.coders.ProtoCoder(tf.train.Example)
elif output_type == 'tf_sequence_example':
coder = beam.coders.ProtoCoder(tf.train.SequenceExample)
else:
raise ValueError('Unsupported output type.')
input_collection = (
pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord(
input_tfrecord,
coder=beam.coders.BytesCoder()))
rekey_collection = input_collection | 'RekeyExamples' >> beam.ParDo(
ReKeyDataFn(sequence_key, time_horizon,
reduce_image_size, max_image_dimension))
grouped_collection = (
rekey_collection | 'GroupBySequenceKey' >> beam.GroupByKey())
grouped_collection = (
grouped_collection | 'ReshuffleGroups' >> beam.Reshuffle())
ordered_collection = (
grouped_collection | 'OrderByFrameNumber' >> beam.ParDo(
SortGroupedDataFn(sequence_key, sorted_image_ids,
max_num_elements_in_context_features)))
ordered_collection = (
ordered_collection | 'ReshuffleSortedGroups' >> beam.Reshuffle())
output_collection = (
ordered_collection | 'AddContextToExamples' >> beam.ParDo(
GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
keep_context_features_image_id_list=(
keep_context_features_image_id_list),
subsample_context_features_rate=subsample_context_features_rate,
keep_only_positives=keep_only_positives,
keep_only_positives_gt=keep_only_positives_gt,
context_features_score_threshold=(
context_features_score_threshold),
max_num_elements_in_context_features=(
max_num_elements_in_context_features),
output_type=output_type,
max_clip_length=max_clip_length,
context_feature_length=context_feature_length)))
output_collection = (
output_collection | 'ReshuffleExamples' >> beam.Reshuffle())
_ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord,
num_shards=num_shards,
coder=coder)
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_tfrecord',
dest='input_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format for object '
'detection, with bounding boxes and contextual feature embeddings.')
parser.add_argument(
'--output_tfrecord',
dest='output_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format, with added '
'contextual memory banks.')
parser.add_argument(
'--sequence_key',
dest='sequence_key',
default='image/location',
help='Key to use when grouping sequences: so far supports `image/seq_id` '
'and `image/location`.')
parser.add_argument(
'--context_feature_length',
dest='context_feature_length',
default=2057,
help='The length of the context feature embeddings stored in the input '
'data.')
parser.add_argument(
'--time_horizon',
dest='time_horizon',
default=None,
help='What time horizon to use when splitting the data, if any. Options '
'are: `year`, `month`, `week`, `day `, `hour`, `minute`, `None`.')
parser.add_argument(
'--subsample_context_features_rate',
dest='subsample_context_features_rate',
default=0,
help='Whether to subsample the context_features, and if so how many to '
'sample. If the rate is set to X, it will sample context from 1 out of '
'every X images. Default is sampling from every image, which is X=0.')
parser.add_argument(
'--reduce_image_size',
dest='reduce_image_size',
default=True,
help='downsamples images to have longest side max_image_dimension, '
'maintaining aspect ratio')
parser.add_argument(
'--max_image_dimension',
dest='max_image_dimension',
default=1024,
help='Sets max image dimension for resizing.')
parser.add_argument(
'--add_context_features',
dest='add_context_features',
default=True,
help='Adds a memory bank of embeddings to each clip')
parser.add_argument(
'--sorted_image_ids',
dest='sorted_image_ids',
default=True,
help='Whether the image source_ids are sortable to deal with '
'date_captured tie-breaks.')
parser.add_argument(
'--image_ids_to_keep',
dest='image_ids_to_keep',
default='All',
help='Path to .json list of image ids to keep, used for ground truth '
'eval creation.')
parser.add_argument(
'--keep_context_features_image_id_list',
dest='keep_context_features_image_id_list',
default=False,
help='Whether or not to keep a list of the image_ids corresponding to '
'the memory bank.')
parser.add_argument(
'--keep_only_positives',
dest='keep_only_positives',
default=False,
help='Whether or not to keep only positive boxes based on score.')
parser.add_argument(
'--context_features_score_threshold',
dest='context_features_score_threshold',
default=0.7,
help='What score threshold to use for boxes in context_features, when '
'`keep_only_positives` is set to `True`.')
parser.add_argument(
'--keep_only_positives_gt',
dest='keep_only_positives_gt',
default=False,
help='Whether or not to keep only positive boxes based on gt class.')
parser.add_argument(
'--max_num_elements_in_context_features',
dest='max_num_elements_in_context_features',
default=2000,
help='Sets max number of context feature elements per memory bank. '
'If the number of images in the context group is greater than '
'`max_num_elements_in_context_features`, the context group will be split.'
)
parser.add_argument(
'--output_type',
dest='output_type',
default='tf_example',
help='Output type, one of `tf_example`, `tf_sequence_example`.')
parser.add_argument(
'--max_clip_length',
dest='max_clip_length',
default=None,
help='Max length for sequence example outputs.')
parser.add_argument(
'--num_shards',
dest='num_shards',
default=0,
help='Number of output shards.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.output_tfrecord)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
construct_pipeline(
p,
args.input_tfrecord,
args.output_tfrecord,
args.sequence_key,
args.time_horizon,
args.subsample_context_features_rate,
args.reduce_image_size,
args.max_image_dimension,
args.add_context_features,
args.sorted_image_ids,
args.image_ids_to_keep,
args.keep_context_features_image_id_list,
args.keep_only_positives,
args.context_features_score_threshold,
args.keep_only_positives_gt,
args.max_num_elements_in_context_features,
args.num_shards,
args.output_type,
args.max_clip_length,
args.context_feature_length)
p.run()
if __name__ == '__main__':
main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py | add_context_to_examples.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A Beam job to generate embedding data for camera trap images.
This tool runs inference with an exported Object Detection model in
`saved_model` format and produce raw embeddings for camera trap data. These
embeddings contain an object-centric feature embedding from Faster R-CNN, the
datetime that the image was taken (normalized in a specific way), and the
position of the object of interest. By default, only the highest-scoring object
embedding is included.
Steps to generate a embedding dataset:
1. Use object_detection/export_inference_graph.py to get a Faster R-CNN
`saved_model` for inference. The input node must accept a tf.Example proto.
2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example
protos containing images for inference.
Example Usage:
--------------
python tensorflow_models/object_detection/export_inference_graph.py \
--alsologtostderr \
--input_type tf_example \
--pipeline_config_path path/to/faster_rcnn_model.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory \
--additional_output_tensor_names detection_features
python generate_embedding_data.py \
--alsologtostderr \
--embedding_input_tfrecord path/to/input_tfrecords* \
--embedding_output_tfrecord path/to/output_tfrecords \
--embedding_model_dir path/to/exported_model_directory/saved_model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import os
import threading
import numpy as np
import six
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
def add_keys(serialized_example):
key = hash(serialized_example)
return key, serialized_example
def drop_keys(key_value_tuple):
return key_value_tuple[1]
def get_date_captured(example):
date_captured = datetime.datetime.strptime(
six.ensure_str(
example.features.feature['image/date_captured'].bytes_list.value[0]),
'%Y-%m-%d %H:%M:%S')
return date_captured
def embed_date_captured(date_captured):
"""Encodes the datetime of the image."""
embedded_date_captured = []
month_max = 12.0
day_max = 31.0
hour_max = 24.0
minute_max = 60.0
min_year = 1990.0
max_year = 2030.0
year = (date_captured.year - min_year) / float(max_year - min_year)
embedded_date_captured.append(year)
month = (date_captured.month - 1) / month_max
embedded_date_captured.append(month)
day = (date_captured.day - 1) / day_max
embedded_date_captured.append(day)
hour = date_captured.hour / hour_max
embedded_date_captured.append(hour)
minute = date_captured.minute / minute_max
embedded_date_captured.append(minute)
return np.asarray(embedded_date_captured)
def embed_position_and_size(box):
"""Encodes the bounding box of the object of interest."""
ymin = box[0]
xmin = box[1]
ymax = box[2]
xmax = box[3]
w = xmax - xmin
h = ymax - ymin
x = xmin + w / 2.0
y = ymin + h / 2.0
return np.asarray([x, y, w, h])
def get_bb_embedding(detection_features, detection_boxes, detection_scores,
index):
embedding = detection_features[0][index]
pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0)
box = detection_boxes[0][index]
position_embedding = embed_position_and_size(box)
score = detection_scores[0][index]
return np.concatenate((pooled_embedding, position_embedding)), score
class GenerateEmbeddingDataFn(beam.DoFn):
"""Generates embedding data for camera trap images.
This Beam DoFn performs inference with an object detection `saved_model` and
produces contextual embedding vectors.
"""
session_lock = threading.Lock()
def __init__(self, model_dir, top_k_embedding_count,
bottom_k_embedding_count, embedding_type='final_box_features'):
"""Initialization function.
Args:
model_dir: A directory containing saved model.
top_k_embedding_count: the number of high-confidence embeddings to store
bottom_k_embedding_count: the number of low-confidence embeddings to store
embedding_type: One of 'final_box_features', 'rpn_box_features'
"""
self._model_dir = model_dir
self._session = None
self._num_examples_processed = beam.metrics.Metrics.counter(
'embedding_data_generation', 'num_tf_examples_processed')
self._top_k_embedding_count = top_k_embedding_count
self._bottom_k_embedding_count = bottom_k_embedding_count
self._embedding_type = embedding_type
def setup(self):
self._load_inference_model()
def _load_inference_model(self):
# Because initialization of the tf.Session is expensive we share
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
with self.session_lock:
self._detect_fn = tf.saved_model.load(self._model_dir)
def process(self, tfexample_key_value):
return self._run_inference_and_generate_embedding(tfexample_key_value)
def _run_inference_and_generate_embedding(self, tfexample_key_value):
key, tfexample = tfexample_key_value
input_example = tf.train.Example.FromString(tfexample)
example = tf.train.Example()
example.CopyFrom(input_example)
try:
date_captured = get_date_captured(input_example)
unix_time = ((date_captured -
datetime.datetime.fromtimestamp(0)).total_seconds())
example.features.feature['image/unix_time'].float_list.value.extend(
[unix_time])
temporal_embedding = embed_date_captured(date_captured)
except Exception: # pylint: disable=broad-except
temporal_embedding = None
detections = self._detect_fn.signatures['serving_default'](
(tf.expand_dims(tf.convert_to_tensor(tfexample), 0)))
if self._embedding_type == 'final_box_features':
detection_features = detections['detection_features']
elif self._embedding_type == 'rpn_box_features':
detection_features = detections['cropped_rpn_box_features']
else:
raise ValueError('embedding type not supported')
detection_boxes = detections['detection_boxes']
num_detections = detections['num_detections']
detection_scores = detections['detection_scores']
num_detections = int(num_detections)
embed_all = []
score_all = []
detection_features = np.asarray(detection_features)
embedding_count = 0
for index in range(min(num_detections, self._top_k_embedding_count)):
bb_embedding, score = get_bb_embedding(
detection_features, detection_boxes, detection_scores, index)
embed_all.extend(bb_embedding)
if temporal_embedding is not None: embed_all.extend(temporal_embedding)
score_all.append(score)
embedding_count += 1
for index in range(
max(0, num_detections - 1),
max(-1, num_detections - 1 - self._bottom_k_embedding_count), -1):
bb_embedding, score = get_bb_embedding(
detection_features, detection_boxes, detection_scores, index)
embed_all.extend(bb_embedding)
if temporal_embedding is not None: embed_all.extend(temporal_embedding)
score_all.append(score)
embedding_count += 1
if embedding_count == 0:
bb_embedding, score = get_bb_embedding(
detection_features, detection_boxes, detection_scores, 0)
embed_all.extend(bb_embedding)
if temporal_embedding is not None: embed_all.extend(temporal_embedding)
score_all.append(score)
# Takes max in case embedding_count is 0.
embedding_length = len(embed_all) // max(1, embedding_count)
embed_all = np.asarray(embed_all)
example.features.feature['image/embedding'].float_list.value.extend(
embed_all)
example.features.feature['image/embedding_score'].float_list.value.extend(
score_all)
example.features.feature['image/embedding_length'].int64_list.value.append(
embedding_length)
example.features.feature['image/embedding_count'].int64_list.value.append(
embedding_count)
self._num_examples_processed.inc(1)
return [(key, example)]
def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir,
top_k_embedding_count, bottom_k_embedding_count,
num_shards, embedding_type):
"""Returns a beam pipeline to run object detection inference.
Args:
pipeline: Initialized beam pipeline.
input_tfrecord: An TFRecord of tf.train.Example protos containing images.
output_tfrecord: An TFRecord of tf.train.Example protos that contain images
in the input TFRecord and the detections from the model.
model_dir: Path to `saved_model` to use for inference.
top_k_embedding_count: The number of high-confidence embeddings to store.
bottom_k_embedding_count: The number of low-confidence embeddings to store.
num_shards: The number of output shards.
embedding_type: Which features to embed.
"""
input_collection = (
pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord(
input_tfrecord, coder=beam.coders.BytesCoder())
| 'AddKeys' >> beam.Map(add_keys))
output_collection = input_collection | 'ExtractEmbedding' >> beam.ParDo(
GenerateEmbeddingDataFn(model_dir, top_k_embedding_count,
bottom_k_embedding_count, embedding_type))
output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle()
_ = output_collection | 'DropKeys' >> beam.Map(
drop_keys) | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example))
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--embedding_input_tfrecord',
dest='embedding_input_tfrecord',
required=True,
help='TFRecord containing images in tf.Example format for object '
'detection.')
parser.add_argument(
'--embedding_output_tfrecord',
dest='embedding_output_tfrecord',
required=True,
help='TFRecord containing embeddings in tf.Example format.')
parser.add_argument(
'--embedding_model_dir',
dest='embedding_model_dir',
required=True,
help='Path to directory containing an object detection SavedModel with'
'detection_box_classifier_features in the output.')
parser.add_argument(
'--top_k_embedding_count',
dest='top_k_embedding_count',
default=1,
help='The number of top k embeddings to add to the memory bank.')
parser.add_argument(
'--bottom_k_embedding_count',
dest='bottom_k_embedding_count',
default=0,
help='The number of bottom k embeddings to add to the memory bank.')
parser.add_argument(
'--num_shards',
dest='num_shards',
default=0,
help='Number of output shards.')
parser.add_argument(
'--embedding_type',
dest='embedding_type',
default='final_box_features',
help='What features to embed, supports `final_box_features`, '
'`rpn_box_features`.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.embedding_output_tfrecord)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
construct_pipeline(
p,
args.embedding_input_tfrecord,
args.embedding_output_tfrecord,
args.embedding_model_dir,
args.top_k_embedding_count,
args.bottom_k_embedding_count,
args.num_shards,
args.embedding_type)
p.run()
if __name__ == '__main__':
main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py | generate_embedding_data.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for add_context_to_examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import datetime
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import add_context_to_examples # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(temp.name)
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def BytesListFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def Int64ListFeature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def FloatListFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateContextDataTest(tf.test.TestCase):
def _create_first_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(six.ensure_binary('image_id_1')),
'image/height': Int64Feature(4),
'image/width': Int64Feature(4),
'image/object/class/label': Int64ListFeature([5, 5]),
'image/object/class/text': BytesListFeature([six.ensure_binary('hyena'),
six.ensure_binary('hyena')
]),
'image/object/bbox/xmin': FloatListFeature([0.0, 0.1]),
'image/object/bbox/xmax': FloatListFeature([0.2, 0.3]),
'image/object/bbox/ymin': FloatListFeature([0.4, 0.5]),
'image/object/bbox/ymax': FloatListFeature([0.6, 0.7]),
'image/seq_id': BytesFeature(six.ensure_binary('01')),
'image/seq_num_frames': Int64Feature(2),
'image/seq_frame_num': Int64Feature(0),
'image/date_captured': BytesFeature(
six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 0, 0)))),
'image/embedding': FloatListFeature([0.1, 0.2, 0.3]),
'image/embedding_score': FloatListFeature([0.9]),
'image/embedding_length': Int64Feature(3),
'image/embedding_count': Int64Feature(1)
}))
return example.SerializeToString()
def _create_second_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(six.ensure_binary('image_id_2')),
'image/height': Int64Feature(4),
'image/width': Int64Feature(4),
'image/object/class/label': Int64ListFeature([5]),
'image/object/class/text': BytesListFeature([six.ensure_binary('hyena')
]),
'image/object/bbox/xmin': FloatListFeature([0.0]),
'image/object/bbox/xmax': FloatListFeature([0.1]),
'image/object/bbox/ymin': FloatListFeature([0.2]),
'image/object/bbox/ymax': FloatListFeature([0.3]),
'image/seq_id': BytesFeature(six.ensure_binary('01')),
'image/seq_num_frames': Int64Feature(2),
'image/seq_frame_num': Int64Feature(1),
'image/date_captured': BytesFeature(
six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 1, 0)))),
'image/embedding': FloatListFeature([0.4, 0.5, 0.6]),
'image/embedding_score': FloatListFeature([0.9]),
'image/embedding_length': Int64Feature(3),
'image/embedding_count': Int64Feature(1)
}))
return example.SerializeToString()
def assert_expected_examples(self, tf_example_list):
self.assertAllEqual(
{tf_example.features.feature['image/source_id'].bytes_list.value[0]
for tf_example in tf_example_list},
{six.ensure_binary('image_id_1'), six.ensure_binary('image_id_2')})
self.assertAllClose(
tf_example_list[0].features.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
self.assertAllClose(
tf_example_list[1].features.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
def assert_expected_sequence_example(self, tf_sequence_example_list):
tf_sequence_example = tf_sequence_example_list[0]
num_frames = 2
self.assertAllEqual(
tf_sequence_example.context.feature[
'clip/media_id'].bytes_list.value[0], six.ensure_binary(
'01_0'))
self.assertAllClose(
tf_sequence_example.context.feature[
'image/context_features'].float_list.value,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
seq_feature_dict = tf_sequence_example.feature_lists.feature_list
self.assertLen(
seq_feature_dict['image/encoded'].feature[:],
num_frames)
actual_timestamps = [
feature.int64_list.value[0] for feature
in seq_feature_dict['image/timestamp'].feature]
timestamps = [0, 1]
self.assertAllEqual(timestamps, actual_timestamps)
# First image.
self.assertAllClose(
[0.4, 0.5],
seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.0, 0.1],
seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:])
self.assertAllClose(
[0.6, 0.7],
seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:])
self.assertAllClose(
[0.2, 0.3],
seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:])
self.assertAllEqual(
[six.ensure_binary('hyena'), six.ensure_binary('hyena')],
seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:])
# Second example.
self.assertAllClose(
[0.2],
seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.0],
seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:])
self.assertAllClose(
[0.3],
seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:])
self.assertAllClose(
[0.1],
seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:])
self.assertAllEqual(
[six.ensure_binary('hyena')],
seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:])
def assert_expected_key(self, key):
self.assertAllEqual(key, b'01')
def assert_sorted(self, example_collection):
example_list = list(example_collection)
counter = 0
for example in example_list:
frame_num = example.features.feature[
'image/seq_frame_num'].int64_list.value[0]
self.assertGreaterEqual(frame_num, counter)
counter = frame_num
def assert_context(self, example_collection):
example_list = list(example_collection)
for example in example_list:
context = example.features.feature[
'image/context_features'].float_list.value
self.assertAllClose([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], context)
def assert_resized(self, example):
width = example.features.feature['image/width'].int64_list.value[0]
self.assertAllEqual(width, 2)
height = example.features.feature['image/height'].int64_list.value[0]
self.assertAllEqual(height, 2)
def assert_size(self, example):
width = example.features.feature['image/width'].int64_list.value[0]
self.assertAllEqual(width, 4)
height = example.features.feature['image/height'].int64_list.value[0]
self.assertAllEqual(height, 4)
def test_sliding_window(self):
example_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
max_clip_length = 3
stride_length = 3
out_list = [list(i) for i in add_context_to_examples.get_sliding_window(
example_list, max_clip_length, stride_length)]
self.assertAllEqual(out_list, [['a', 'b', 'c'],
['d', 'e', 'f'],
['g']])
def test_rekey_data_fn(self):
sequence_key = 'image/seq_id'
time_horizon = None
reduce_image_size = False
max_dim = None
rekey_fn = add_context_to_examples.ReKeyDataFn(
sequence_key, time_horizon,
reduce_image_size, max_dim)
output = rekey_fn.process(self._create_first_tf_example())
self.assert_expected_key(output[0][0])
self.assert_size(output[0][1])
def test_rekey_data_fn_w_resize(self):
sequence_key = 'image/seq_id'
time_horizon = None
reduce_image_size = True
max_dim = 2
rekey_fn = add_context_to_examples.ReKeyDataFn(
sequence_key, time_horizon,
reduce_image_size, max_dim)
output = rekey_fn.process(self._create_first_tf_example())
self.assert_expected_key(output[0][0])
self.assert_resized(output[0][1])
def test_sort_fn(self):
sequence_key = 'image/seq_id'
sorted_image_ids = False
max_num_elements_in_context_features = 10
sort_fn = add_context_to_examples.SortGroupedDataFn(
sequence_key, sorted_image_ids, max_num_elements_in_context_features)
output = sort_fn.process(
('dummy_key', [tf.train.Example.FromString(
self._create_second_tf_example()),
tf.train.Example.FromString(
self._create_first_tf_example())]))
self.assert_sorted(output[0][1])
def test_add_context_fn(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep)
output = context_fn.process(
('dummy_key', [tf.train.Example.FromString(
self._create_first_tf_example()),
tf.train.Example.FromString(
self._create_second_tf_example())]))
self.assertEqual(len(output), 2)
self.assert_context(output)
def test_add_context_fn_output_sequence_example(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
output_type='tf_sequence_example')
output = context_fn.process(
('01',
[tf.train.Example.FromString(self._create_first_tf_example()),
tf.train.Example.FromString(self._create_second_tf_example())]))
self.assertEqual(len(output), 1)
self.assert_expected_sequence_example(output)
def test_add_context_fn_output_sequence_example_cliplen(self):
sequence_key = 'image/seq_id'
add_context_features = True
image_ids_to_keep = 'All'
context_fn = add_context_to_examples.GenerateContextFn(
sequence_key, add_context_features, image_ids_to_keep,
output_type='tf_sequence_example', max_clip_length=1)
output = context_fn.process(
('01',
[tf.train.Example.FromString(self._create_first_tf_example()),
tf.train.Example.FromString(self._create_second_tf_example())]))
self.assertEqual(len(output), 2)
def test_beam_pipeline(self):
with InMemoryTFRecord(
[self._create_first_tf_example(),
self._create_second_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
sequence_key = six.ensure_binary('image/seq_id')
max_num_elements = 10
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
add_context_to_examples.construct_pipeline(
p,
input_tfrecord,
output_tfrecord,
sequence_key,
max_num_elements_in_context_features=max_num_elements,
num_shards=num_shards)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 2)
self.assert_expected_examples([tf.train.Example.FromString(
tf_example) for tf_example in actual_output])
def test_beam_pipeline_sequence_example(self):
with InMemoryTFRecord(
[self._create_first_tf_example(),
self._create_second_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
sequence_key = six.ensure_binary('image/seq_id')
max_num_elements = 10
num_shards = 1
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
add_context_to_examples.construct_pipeline(
p,
input_tfrecord,
output_tfrecord,
sequence_key,
max_num_elements_in_context_features=max_num_elements,
num_shards=num_shards,
output_type='tf_sequence_example')
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_sequence_example(
[tf.train.SequenceExample.FromString(
tf_example) for tf_example in actual_output])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py | add_context_to_examples_tf2_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Beam pipeline to create COCO Camera Traps Object Detection TFRecords.
Please note that this tool creates sharded output files.
This tool assumes the input annotations are in the COCO Camera Traps json
format, specified here:
https://github.com/Microsoft/CameraTraps/blob/master/data_management/README.md
Example usage:
python create_cococameratraps_tfexample_main.py \
--alsologtostderr \
--output_tfrecord_prefix="/path/to/output/tfrecord/location/prefix" \
--image_directory="/path/to/image/folder/" \
--input_annotations_file="path/to/annotations.json"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import hashlib
import io
import json
import os
import numpy as np
import PIL.Image
import tensorflow as tf
from object_detection.utils import dataset_util
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class ParseImage(beam.DoFn):
"""A DoFn that parses a COCO-CameraTraps json and emits TFRecords."""
def __init__(self, image_directory, images, annotations, categories,
keep_bboxes):
"""Initialization function.
Args:
image_directory: Path to image directory
images: list of COCO Camera Traps style image dictionaries
annotations: list of COCO Camera Traps style annotation dictionaries
categories: list of COCO Camera Traps style category dictionaries
keep_bboxes: Whether to keep any bounding boxes that exist in the
annotations
"""
self._image_directory = image_directory
self._image_dict = {im['id']: im for im in images}
self._annotation_dict = {im['id']: [] for im in images}
self._category_dict = {int(cat['id']): cat for cat in categories}
for ann in annotations:
self._annotation_dict[ann['image_id']].append(ann)
self._images = images
self._keep_bboxes = keep_bboxes
self._num_examples_processed = beam.metrics.Metrics.counter(
'cococameratraps_data_generation', 'num_tf_examples_processed')
def process(self, image_id):
"""Builds a tf.Example given an image id.
Args:
image_id: the image id of the associated image
Returns:
List of tf.Examples.
"""
image = self._image_dict[image_id]
annotations = self._annotation_dict[image_id]
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
image_location_id = image['location']
image_datetime = str(image['date_captured'])
image_sequence_id = str(image['seq_id'])
image_sequence_num_frames = int(image['seq_num_frames'])
image_sequence_frame_num = int(image['frame_num'])
full_path = os.path.join(self._image_directory, filename)
try:
# Ensure the image exists and is not corrupted
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
image = tf.io.decode_jpeg(encoded_jpg, channels=3)
except Exception: # pylint: disable=broad-except
# The image file is missing or corrupt
return []
key = hashlib.sha256(encoded_jpg).hexdigest()
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/location':
dataset_util.bytes_feature(str(image_location_id).encode('utf8')),
'image/seq_num_frames':
dataset_util.int64_feature(image_sequence_num_frames),
'image/seq_frame_num':
dataset_util.int64_feature(image_sequence_frame_num),
'image/seq_id':
dataset_util.bytes_feature(image_sequence_id.encode('utf8')),
'image/date_captured':
dataset_util.bytes_feature(image_datetime.encode('utf8'))
}
num_annotations_skipped = 0
if annotations:
xmin = []
xmax = []
ymin = []
ymax = []
category_names = []
category_ids = []
area = []
for object_annotations in annotations:
if 'bbox' in object_annotations and self._keep_bboxes:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
if 'area' in object_annotations:
area.append(object_annotations['area'])
else:
# approximate area using l*w/2
area.append(width*height/2.0)
category_id = int(object_annotations['category_id'])
category_ids.append(category_id)
category_names.append(
self._category_dict[category_id]['name'].encode('utf8'))
feature_dict.update({
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/class/label':
dataset_util.int64_list_feature(category_ids),
'image/object/area':
dataset_util.float_list_feature(area),
})
# For classification, add the first category to image/class/label and
# image/class/text
if not category_ids:
feature_dict.update({
'image/class/label':
dataset_util.int64_list_feature([0]),
'image/class/text':
dataset_util.bytes_list_feature(['empty'.encode('utf8')]),
})
else:
feature_dict.update({
'image/class/label':
dataset_util.int64_list_feature([category_ids[0]]),
'image/class/text':
dataset_util.bytes_list_feature([category_names[0]]),
})
else:
# Add empty class if there are no annotations
feature_dict.update({
'image/class/label':
dataset_util.int64_list_feature([0]),
'image/class/text':
dataset_util.bytes_list_feature(['empty'.encode('utf8')]),
})
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
self._num_examples_processed.inc(1)
return [(example)]
def load_json_data(data_file):
with tf.io.gfile.GFile(data_file, 'r') as fid:
data_dict = json.load(fid)
return data_dict
def create_pipeline(pipeline,
image_directory,
input_annotations_file,
output_tfrecord_prefix=None,
num_images_per_shard=200,
keep_bboxes=True):
"""Creates a beam pipeline for producing a COCO-CameraTraps Image dataset.
Args:
pipeline: Initialized beam pipeline.
image_directory: Path to image directory
input_annotations_file: Path to a coco-cameratraps annotation file
output_tfrecord_prefix: Absolute path for tfrecord outputs. Final files will
be named {output_tfrecord_prefix}@N.
num_images_per_shard: The number of images to store in each shard
keep_bboxes: Whether to keep any bounding boxes that exist in the json file
"""
data = load_json_data(input_annotations_file)
num_shards = int(np.ceil(float(len(data['images']))/num_images_per_shard))
image_examples = (
pipeline | ('CreateCollections') >> beam.Create(
[im['id'] for im in data['images']])
| ('ParseImage') >> beam.ParDo(ParseImage(
image_directory, data['images'], data['annotations'],
data['categories'], keep_bboxes=keep_bboxes)))
_ = (image_examples
| ('Reshuffle') >> beam.Reshuffle()
| ('WriteTfImageExample') >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord_prefix,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example)))
def parse_args(argv):
"""Command-line argument parser.
Args:
argv: command line arguments
Returns:
beam_args: Arguments for the beam pipeline.
pipeline_args: Arguments for the pipeline options, such as runner type.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_directory',
dest='image_directory',
required=True,
help='Path to the directory where the images are stored.')
parser.add_argument(
'--output_tfrecord_prefix',
dest='output_tfrecord_prefix',
required=True,
help='Path and prefix to store TFRecords containing images in tf.Example'
'format.')
parser.add_argument(
'--input_annotations_file',
dest='input_annotations_file',
required=True,
help='Path to Coco-CameraTraps style annotations file.')
parser.add_argument(
'--num_images_per_shard',
dest='num_images_per_shard',
default=200,
help='The number of images to be stored in each outputshard.')
beam_args, pipeline_args = parser.parse_known_args(argv)
return beam_args, pipeline_args
def main(argv=None, save_main_session=True):
"""Runs the Beam pipeline that performs inference.
Args:
argv: Command line arguments.
save_main_session: Whether to save the main session.
"""
args, pipeline_args = parse_args(argv)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_args)
pipeline_options.view_as(
beam.options.pipeline_options.SetupOptions).save_main_session = (
save_main_session)
dirname = os.path.dirname(args.output_tfrecord_prefix)
tf.io.gfile.makedirs(dirname)
p = beam.Pipeline(options=pipeline_options)
create_pipeline(
pipeline=p,
image_directory=args.image_directory,
input_annotations_file=args.input_annotations_file,
output_tfrecord_prefix=args.output_tfrecord_prefix,
num_images_per_shard=args.num_images_per_shard)
p.run()
if __name__ == '__main__':
main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py | create_cococameratraps_tfexample_main.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for create_cococameratraps_tfexample_main."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import tempfile
import unittest
import numpy as np
from PIL import Image
import tensorflow as tf
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
IMAGE_HEIGHT = 360
IMAGE_WIDTH = 480
def _write_random_images_to_directory(self, directory, num_frames):
for frame_num in range(num_frames):
img = np.random.randint(0, high=256,
size=(self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 3),
dtype=np.uint8)
pil_image = Image.fromarray(img)
fname = 'im_' + str(frame_num) + '.jpg'
pil_image.save(os.path.join(directory, fname), 'JPEG')
def _create_json_file(self, directory, num_frames, keep_bboxes=False):
json_dict = {'images': [], 'annotations': []}
json_dict['categories'] = [{'id': 0, 'name': 'empty'},
{'id': 1, 'name': 'animal'}]
for idx in range(num_frames):
im = {'id': 'im_' + str(idx),
'file_name': 'im_' + str(idx) + '.jpg',
'height': self.IMAGE_HEIGHT,
'width': self.IMAGE_WIDTH,
'seq_id': 'seq_1',
'seq_num_frames': num_frames,
'frame_num': idx,
'location': 'loc_' + str(idx),
'date_captured': str(datetime.datetime.now())
}
json_dict['images'].append(im)
ann = {'id': 'ann' + str(idx),
'image_id': 'im_' + str(idx),
'category_id': 1,
}
if keep_bboxes:
ann['bbox'] = [0.0 * self.IMAGE_WIDTH,
0.1 * self.IMAGE_HEIGHT,
0.5 * self.IMAGE_WIDTH,
0.5 * self.IMAGE_HEIGHT]
json_dict['annotations'].append(ann)
json_path = os.path.join(directory, 'test_file.json')
with tf.io.gfile.GFile(json_path, 'w') as f:
json.dump(json_dict, f)
return json_path
def assert_expected_example_bbox(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'animal'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'animal'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value,
[self.IMAGE_HEIGHT])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value,
[self.IMAGE_WIDTH])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'im_0'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def assert_expected_example(self, example):
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'animal'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [1])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'animal'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value,
[self.IMAGE_HEIGHT])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value,
[self.IMAGE_WIDTH])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'im_0'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_beam_pipeline(self):
num_frames = 1
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
json_path = self._create_json_file(temp_dir, num_frames)
output_tfrecord = temp_dir+'/output'
self._write_random_images_to_directory(temp_dir, num_frames)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
create_cococameratraps_tfexample_main.create_pipeline(
p, temp_dir, json_path,
output_tfrecord_prefix=output_tfrecord)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
def test_beam_pipeline_bbox(self):
num_frames = 1
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
json_path = self._create_json_file(temp_dir, num_frames, keep_bboxes=True)
output_tfrecord = temp_dir+'/output'
self._write_random_images_to_directory(temp_dir, num_frames)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
create_cococameratraps_tfexample_main.create_pipeline(
p, temp_dir, json_path,
output_tfrecord_prefix=output_tfrecord,
keep_bboxes=True)
p.run()
filenames = tf.io.gfile.glob(output_tfrecord+'-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
self.assert_expected_example_bbox(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py | create_cococameratraps_tfexample_tf2_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.legacy import trainer
from object_detection.utils import config_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean('clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string('train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
@deprecated(None, 'Use object_detection/model_main.py.')
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
if FLAGS.task == 0:
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.train_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
train_config_path=FLAGS.train_config_path,
train_input_config_path=FLAGS.input_config_path)
if FLAGS.task == 0:
for name, config in [('model.config', FLAGS.model_config_path),
('train.config', FLAGS.train_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name),
overwrite=True)
model_config = configs['model']
train_config = configs['train_config']
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
def get_next(config):
return dataset_builder.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=True)
trainer.train(
create_input_dict_fn,
model_fn,
train_config,
master,
task,
FLAGS.num_clones,
worker_replicas,
FLAGS.clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
FLAGS.train_dir,
graph_hook_fn=graph_rewriter_fn)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/train.py | train.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model evaluator.
This file provides a generic evaluation method that can be used to evaluate a
DetectionModel.
"""
import logging
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection.core import prefetcher
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import object_detection_evaluation
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
# DEPRECATED: please use oid_V2_detection_metrics instead
'open_images_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
# DEPRECATED: please use oid_challenge_detection_metrics instead
'oid_challenge_object_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'oid_challenge_segmentation_metrics':
object_detection_evaluation
.OpenImagesInstanceSegmentationChallengeEvaluator,
}
EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics'
def _extract_predictions_and_losses(model,
create_input_dict_fn,
ignore_groundtruth=False):
"""Constructs tensorflow detection graph and returns output tensors.
Args:
model: model to perform predictions with.
create_input_dict_fn: function to create input tensor dictionaries.
ignore_groundtruth: whether groundtruth should be ignored.
Returns:
prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed
by standard_fields.DetectionResultsFields) and optional groundtruth
tensors (keyed by standard_fields.InputDataFields).
losses_dict: A dictionary containing detection losses. This is empty when
ignore_groundtruth is true.
"""
input_dict = create_input_dict_fn()
prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)
input_dict = prefetch_queue.dequeue()
original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0)
preprocessed_image, true_image_shapes = model.preprocess(
tf.cast(original_image, dtype=tf.float32))
prediction_dict = model.predict(preprocessed_image, true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
groundtruth = None
losses_dict = {}
if not ignore_groundtruth:
groundtruth = {
fields.InputDataFields.groundtruth_boxes:
input_dict[fields.InputDataFields.groundtruth_boxes],
fields.InputDataFields.groundtruth_classes:
input_dict[fields.InputDataFields.groundtruth_classes],
fields.InputDataFields.groundtruth_area:
input_dict[fields.InputDataFields.groundtruth_area],
fields.InputDataFields.groundtruth_is_crowd:
input_dict[fields.InputDataFields.groundtruth_is_crowd],
fields.InputDataFields.groundtruth_difficult:
input_dict[fields.InputDataFields.groundtruth_difficult]
}
if fields.InputDataFields.groundtruth_group_of in input_dict:
groundtruth[fields.InputDataFields.groundtruth_group_of] = (
input_dict[fields.InputDataFields.groundtruth_group_of])
groundtruth_masks_list = None
if fields.DetectionResultFields.detection_masks in detections:
groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (
input_dict[fields.InputDataFields.groundtruth_instance_masks])
groundtruth_masks_list = [
input_dict[fields.InputDataFields.groundtruth_instance_masks]]
groundtruth_keypoints_list = None
if fields.DetectionResultFields.detection_keypoints in detections:
groundtruth[fields.InputDataFields.groundtruth_keypoints] = (
input_dict[fields.InputDataFields.groundtruth_keypoints])
groundtruth_keypoints_list = [
input_dict[fields.InputDataFields.groundtruth_keypoints]]
label_id_offset = 1
model.provide_groundtruth(
[input_dict[fields.InputDataFields.groundtruth_boxes]],
[tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes]
- label_id_offset, depth=model.num_classes)],
groundtruth_masks_list, groundtruth_keypoints_list)
losses_dict.update(model.loss(prediction_dict, true_image_shapes))
result_dict = eval_util.result_dict_for_single_example(
original_image,
input_dict[fields.InputDataFields.source_id],
detections,
groundtruth,
class_agnostic=(
fields.DetectionResultFields.detection_classes not in detections),
scale_to_absolute=True)
return result_dict, losses_dict
def get_evaluators(eval_config, categories):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: evaluation configurations.
categories: a list of categories to evaluate.
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
if eval_metric_fn_key == 'oid_challenge_object_detection_metrics':
logging.warning(
'oid_challenge_object_detection_metrics is deprecated; '
'use oid_challenge_detection_metrics instead'
)
if eval_metric_fn_key == 'oid_V2_detection_metrics':
logging.warning(
'open_images_V2_detection_metrics is deprecated; '
'use oid_V2_detection_metrics instead'
)
evaluators_list.append(
EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))
return evaluators_list
def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,
checkpoint_dir, eval_dir, graph_hook_fn=None, evaluator_list=None):
"""Evaluation function for detection models.
Args:
create_input_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel.
eval_config: a eval_pb2.EvalConfig protobuf.
categories: a list of category dictionaries. Each dict in the list should
have an integer 'id' field and string 'name' field.
checkpoint_dir: directory to load the checkpoints to evaluate from.
eval_dir: directory to write evaluation metrics summary to.
graph_hook_fn: Optional function that is called after the training graph is
completely built. This is helpful to perform additional changes to the
training graph such as optimizing batchnorm. The function should modify
the default graph.
evaluator_list: Optional list of instances of DetectionEvaluator. If not
given, this list of metrics is created according to the eval_config.
Returns:
metrics: A dictionary containing metric names and values from the latest
run.
"""
model = create_model_fn()
if eval_config.ignore_groundtruth and not eval_config.export_path:
logging.fatal('If ignore_groundtruth=True then an export_path is '
'required. Aborting!!!')
tensor_dict, losses_dict = _extract_predictions_and_losses(
model=model,
create_input_dict_fn=create_input_dict_fn,
ignore_groundtruth=eval_config.ignore_groundtruth)
def _process_batch(tensor_dict, sess, batch_index, counters,
losses_dict=None):
"""Evaluates tensors in tensor_dict, losses_dict and visualizes examples.
This function calls sess.run on tensor_dict, evaluating the original_image
tensor only on the first K examples and visualizing detections overlaid
on this original_image.
Args:
tensor_dict: a dictionary of tensors
sess: tensorflow session
batch_index: the index of the batch amongst all batches in the run.
counters: a dictionary holding 'success' and 'skipped' fields which can
be updated to keep track of number of successful and failed runs,
respectively. If these fields are not updated, then the success/skipped
counter values shown at the end of evaluation will be incorrect.
losses_dict: Optional dictonary of scalar loss tensors.
Returns:
result_dict: a dictionary of numpy arrays
result_losses_dict: a dictionary of scalar losses. This is empty if input
losses_dict is None.
"""
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
logging.info('Skipping image')
counters['skipped'] += 1
return {}, {}
global_step = tf.train.global_step(sess, tf.train.get_global_step())
if batch_index < eval_config.num_visualizations:
tag = 'image-{}'.format(batch_index)
eval_util.visualize_detection_results(
result_dict,
tag,
global_step,
categories=categories,
summary_dir=eval_dir,
export_dir=eval_config.visualization_export_dir,
show_groundtruth=eval_config.visualize_groundtruth_boxes,
groundtruth_box_visualization_color=eval_config.
groundtruth_box_visualization_color,
min_score_thresh=eval_config.min_score_threshold,
max_num_predictions=eval_config.max_num_boxes_to_visualize,
skip_scores=eval_config.skip_scores,
skip_labels=eval_config.skip_labels,
keep_image_id_for_visualization_export=eval_config.
keep_image_id_for_visualization_export)
return result_dict, result_losses_dict
if graph_hook_fn: graph_hook_fn()
variables_to_restore = tf.global_variables()
global_step = tf.train.get_or_create_global_step()
variables_to_restore.append(global_step)
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
def _restore_latest_checkpoint(sess):
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(sess, latest_checkpoint)
if not evaluator_list:
evaluator_list = get_evaluators(eval_config, categories)
metrics = eval_util.repeated_checkpoint_run(
tensor_dict=tensor_dict,
summary_dir=eval_dir,
evaluators=evaluator_list,
batch_processor=_process_batch,
checkpoint_dirs=[checkpoint_dir],
variables_to_restore=None,
restore_fn=_restore_latest_checkpoint,
num_batches=eval_config.num_examples,
eval_interval_secs=eval_config.eval_interval_secs,
max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else
eval_config.max_evals
if eval_config.max_evals else None),
master=eval_config.eval_master,
save_graph=eval_config.save_graph,
save_graph_dir=(eval_dir if eval_config.save_graph else ''),
losses_dict=losses_dict,
eval_export_path=eval_config.export_path)
return metrics
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/evaluator.py | evaluator.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Evaluation executable for detection models.
This executable is used to evaluate DetectionModels. There are two ways of
configuring the eval job.
1) A single pipeline_pb2.TrainEvalPipelineConfig file maybe specified instead.
In this mode, the --eval_training_data flag may be given to force the pipeline
to evaluate on training data instead.
Example usage:
./eval \
--logtostderr \
--checkpoint_dir=path/to/checkpoint_dir \
--eval_dir=path/to/eval_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files may be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being evaluated, an
input_reader_pb2.InputReader file to specify what data the model is evaluating
and an eval_pb2.EvalConfig file to configure evaluation parameters.
Example usage:
./eval \
--logtostderr \
--checkpoint_dir=path/to/checkpoint_dir \
--eval_dir=path/to/eval_dir \
--eval_config_path=eval_config.pbtxt \
--model_config_path=model_config.pbtxt \
--input_config_path=eval_input_config.pbtxt
"""
import functools
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.util.deprecation import deprecated
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.legacy import evaluator
from object_detection.utils import config_util
from object_detection.utils import label_map_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.')
flags.DEFINE_string(
'checkpoint_dir', '',
'Directory containing checkpoints to evaluate, typically '
'set to `train_dir` used in the training job.')
flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.')
flags.DEFINE_string(
'pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('eval_config_path', '',
'Path to an eval_pb2.EvalConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
flags.DEFINE_boolean(
'run_once', False, 'Option to only run a single pass of '
'evaluation. Overrides the `max_evals` parameter in the '
'provided config.')
FLAGS = flags.FLAGS
@deprecated(None, 'Use object_detection/model_main.py.')
def main(unused_argv):
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
tf.gfile.MakeDirs(FLAGS.eval_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
tf.gfile.Copy(
FLAGS.pipeline_config_path,
os.path.join(FLAGS.eval_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
eval_config_path=FLAGS.eval_config_path,
eval_input_config_path=FLAGS.input_config_path)
for name, config in [('model.config', FLAGS.model_config_path),
('eval.config', FLAGS.eval_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True)
model_config = configs['model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build, model_config=model_config, is_training=False)
def get_next(config):
return dataset_builder.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
categories = label_map_util.create_categories_from_labelmap(
input_config.label_map_path)
if FLAGS.run_once:
eval_config.max_evals = 1
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=False)
evaluator.evaluate(
create_input_dict_fn,
model_fn,
eval_config,
categories,
FLAGS.checkpoint_dir,
FLAGS.eval_dir,
graph_hook_fn=graph_rewriter_fn)
if __name__ == '__main__':
tf.app.run()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/eval.py | eval.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import unittest
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.legacy import trainer
from object_detection.protos import train_pb2
from object_detection.utils import tf_version
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
key = tf.constant('image_000000')
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
multiclass_scores = tf.random_uniform(
[1, NUMBER_OF_CLASSES], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.key: key,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label,
fields.InputDataFields.multiclass_scores: multiclass_scores
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss()
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss()
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
true_image_shapes = [inputs.shape[:-1].as_list()
for _ in range(inputs.shape[-1])]
return tf.image.resize_images(inputs, [28, 28]), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = slim.flatten(preprocessed_inputs)
class_prediction = slim.fully_connected(flattened_inputs, self._num_classes)
box_prediction = slim.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict, true_image_shapes):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
pass
def restore_map(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping variable names to variables.
"""
return {var.op.name: var for var in tf.global_variables()}
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
pass
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(
create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
def test_configure_trainer_with_multiclass_scores_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
use_multiclass_scores: true
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/trainer_tf1_test.py | trainer_tf1_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.builders import optimizer_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import batcher
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
def create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
prefetch_queue_capacity, data_augmentation_options):
"""Sets up reader, prefetcher and returns input queue.
Args:
batch_size_per_clone: batch size to use per clone.
create_tensor_dict_fn: function to create tensor dictionary.
batch_queue_capacity: maximum number of elements to store within a queue.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: maximum capacity of the queue used to prefetch
assembled batches.
data_augmentation_options: a list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
Returns:
input queue: a batcher.BatchQueue object holding enqueued tensor_dicts
(which hold images, boxes and targets). To get a batch of tensor_dicts,
call input_queue.Dequeue().
"""
tensor_dict = create_tensor_dict_fn()
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tensor_dict[fields.InputDataFields.image], 0)
images = tensor_dict[fields.InputDataFields.image]
float_images = tf.cast(images, dtype=tf.float32)
tensor_dict[fields.InputDataFields.image] = float_images
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores
in tensor_dict)
if data_augmentation_options:
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_label_weights=True,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints))
input_queue = batcher.BatchQueue(
tensor_dict,
batch_size=batch_size_per_clone,
batch_queue_capacity=batch_queue_capacity,
num_batch_queue_threads=num_batch_queue_threads,
prefetch_queue_capacity=prefetch_queue_capacity)
return input_queue
def get_inputs(input_queue,
num_classes,
merge_multiple_label_boxes=False,
use_multiclass_scores=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
use_multiclass_scores: Whether to use multiclass scores instead of
groundtruth_classes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot (or K-hot) float32 tensors containing
target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints_list: a list of 3-D float tensors of shape [num_boxes,
num_keypoints, 2] containing keypoints for objects if present in the
input queue. Else returns None.
weights_lists: a list of 1-D float32 tensors of shape [num_boxes]
containing groundtruth weight for each box.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
image = read_data[fields.InputDataFields.image]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
tf.int32)
classes_gt -= label_id_offset
if merge_multiple_label_boxes and use_multiclass_scores:
raise ValueError(
'Using both merge_multiple_label_boxes and use_multiclass_scores is'
'not supported'
)
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
classes_gt = tf.cast(classes_gt, tf.float32)
elif use_multiclass_scores:
classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores],
tf.float32)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints)
if (merge_multiple_label_boxes and (
masks_gt is not None or keypoints_gt is not None)):
raise NotImplementedError('Multi-label support is only for boxes.')
weights_gt = read_data.get(
fields.InputDataFields.groundtruth_weights)
return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
weights_gt)
return zip(*map(extract_images_and_targets, read_data_list))
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list,
groundtruth_weights_list) = get_inputs(
input_queue,
detection_model.num_classes,
train_config.merge_multiple_label_boxes,
train_config.use_multiclass_scores)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_keypoints_list,
groundtruth_weights_list=groundtruth_weights_list)
prediction_dict = detection_model.predict(images, true_image_shapes)
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def train(create_tensor_dict_fn,
create_model_fn,
train_config,
master,
task,
num_clones,
worker_replicas,
clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
train_dir,
graph_hook_fn=None):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
graph_hook_fn: Optional function that is called after the inference graph is
built (before optimization). This is helpful to perform additional changes
to the training graph such as adding FakeQuant ops. The function should
modify the default graph.
Raises:
ValueError: If both num_clones > 1 and train_config.sync_replicas is true.
"""
detection_model = create_model_fn()
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options]
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
if num_clones != 1 and train_config.sync_replicas:
raise ValueError('In Synchronous SGD mode num_clones must ',
'be 1. Found num_clones: {}'.format(num_clones))
batch_size = train_config.batch_size // num_clones
if train_config.sync_replicas:
batch_size //= train_config.replicas_to_aggregate
with tf.device(deploy_config.inputs_device()):
input_queue = create_input_queue(
batch_size, create_tensor_dict_fn,
train_config.batch_queue_capacity,
train_config.num_batch_queue_threads,
train_config.prefetch_queue_capacity, data_augmentation_options)
# Gather initial summaries.
# TODO(rathodv): See if summaries can be added/extracted from global tf
# collections so that they don't have to be passed around.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
model_fn = functools.partial(_create_losses,
create_model_fn=create_model_fn,
train_config=train_config)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
if graph_hook_fn:
with tf.device(deploy_config.variables_device()):
graph_hook_fn()
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
with tf.device(deploy_config.optimizer_device()):
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var, family='LearningRate')
sync_optimizer = None
if train_config.sync_replicas:
training_optimizer = tf.train.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=worker_replicas)
sync_optimizer = training_optimizer
with tf.device(deploy_config.optimizer_device()):
regularization_losses = (None if train_config.add_regularization_loss
else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, training_optimizer,
regularization_losses=regularization_losses)
total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally freeze some layers by setting their gradients to be zero.
if train_config.freeze_variables:
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, train_config.freeze_variables)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
# Create gradient updates.
grad_updates = training_optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add summaries.
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram('ModelVars/' +
model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name,
loss_tensor))
global_summaries.add(
tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, fine_tune_checkpoint_type is set based on
# from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
var_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (variables_helper.
get_variables_available_in_checkpoint(
var_map, train_config.fine_tune_checkpoint,
include_global_step=False))
init_saver = tf.train.Saver(available_var_map)
def initializer_fn(sess):
init_saver.restore(sess, train_config.fine_tune_checkpoint)
init_fn = initializer_fn
slim.learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(
train_config.num_steps if train_config.num_steps else None),
save_summaries_secs=120,
sync_optimizer=sync_optimizer,
saver=saver)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/trainer.py | trainer.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.