#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 2018/3/19 17:13
@desc: 
"""
import tensorflow as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder
from object_detection.protos import input_reader_pb2

parallel_reader = tf.contrib.slim.parallel_reader
slim_example_decoder = tf.contrib.slim.tfexample_decoder


def build(input_reader_config, decoder_fun):
    """Builds a tensor dictionary based on the InputReader config.

    Args:
      input_reader_config: A input_reader_pb2.InputReader object.
      decoder_fun

    Returns:
      A tensor dict based on the input_reader_config.

    Raises:
      ValueError: On invalid input reader proto.
      ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')

    if input_reader_config.WhichOneof(
            'input_reader') == 'tf_record_input_reader':
        config = input_reader_config.tf_record_input_reader
        if not config.input_path:
            raise ValueError('At least one input path must be specified in '
                             '`input_reader_config`.')
        num_epochs = input_reader_config.num_epochs
        _, string_tensor = parallel_reader.parallel_read(
            config.input_path[:],  # Convert `RepeatedScalarContainer` to list.
            reader_class=tf.TFRecordReader,
            num_epochs=(num_epochs if num_epochs else None),
            num_readers=input_reader_config.num_readers,
            shuffle=input_reader_config.shuffle,
            dtypes=[tf.string, tf.string],
            capacity=input_reader_config.queue_capacity,
            min_after_dequeue=input_reader_config.min_after_dequeue)

        label_map_proto_file = None
        if input_reader_config.HasField('label_map_path'):
            label_map_proto_file = input_reader_config.label_map_path
        decoder = decoder_fun(
            load_instance_masks=input_reader_config.load_instance_masks,
            instance_mask_type=input_reader_config.mask_type,
            label_map_proto_file=label_map_proto_file)
        return decoder.decode(string_tensor)

    raise ValueError('Unsupported input_reader_config.')


class DataAugmentDecoder(TfExampleDecoder):

    def __init__(self,
                 load_instance_masks=False,
                 instance_mask_type=input_reader_pb2.NUMERICAL_MASKS,
                 label_map_proto_file=None,
                 use_display_name=False,
                 dct_method=''):
        super(DataAugmentDecoder, self).__init__(
            load_instance_masks=load_instance_masks,
            instance_mask_type=instance_mask_type,
            label_map_proto_file=label_map_proto_file,
            use_display_name=use_display_name,
            dct_method=dct_method)

    def decode(self, tf_example_string_tensor):
        """Decodes serialized tensorflow example and returns a tensor dictionary.

        Args:
          tf_example_string_tensor: a string tensor holding a serialized tensorflow
            example proto.

        Returns:
          A dictionary of the following tensors.
          fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]
            containing image.
          fields.InputDataFields.source_id - string tensor containing original
            image id.
          fields.InputDataFields.key - string tensor with unique sha256 hash key.
          fields.InputDataFields.filename - string tensor with original dataset
            filename.
          fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape
            [None, 4] containing box corners.
          fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape
            [None] containing classes for the boxes.
          fields.InputDataFields.groundtruth_weights - 1D float32 tensor of
            shape [None] indicating the weights of groundtruth boxes.
          fields.InputDataFields.num_groundtruth_boxes - int32 scalar indicating
            the number of groundtruth_boxes.
          fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape
            [None] containing containing object mask area in pixel squared.
          fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape
            [None] indicating if the boxes enclose a crowd.

        Optional:
          fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape
            [None] indicating if the boxes represent `difficult` instances.
          fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape
            [None] indicating if the boxes represent `group_of` instances.
          fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of
            shape [None, None, None] containing instance masks.
        """
        serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
        decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
                                                        self.items_to_handlers)
        keys = decoder.list_items()
        tensors = decoder.decode(serialized_example, items=keys)
        tensor_dict = dict(zip(keys, tensors))
        is_crowd = fields.InputDataFields.groundtruth_is_crowd
        tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
        tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])

        self._data_augment(tensor_dict)

        tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.shape(
            tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]

        def default_groundtruth_weights():
            return tf.ones(
                [tf.shape(
                    tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]],
                dtype=tf.float32)

        tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
            tf.greater(
                tf.shape(
                    tensor_dict[fields.InputDataFields.groundtruth_weights])[0],
                0),
            lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
            default_groundtruth_weights)
        return tensor_dict

    def _data_augment(self, tensor_dict):
        """
        重载这个方法，实现数据增广

        Parameters
        ----------
        tensor_dict

        Returns
        -------

        """
        pass
