#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 12/28/17 10:51 AM
@desc: Implementation of CNN-based Cascaded Multi-task Learning of High-level Prior and Density
       Estimation for Crowd Counting (Sindagi et al.)
"""
import tensorflow as tf

_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5


def prelu(inputs, scope=None):
    """
    parametric ReLU activation

    Parameters
    ----------
    inputs: A tensor of size [batch, height_in, width_in, channels].
    scope:

    Returns
    -------

    """
    with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
        alpha = tf.get_variable("prelu", shape=inputs.get_shape()[-1],
                                dtype=inputs.dtype, initializer=tf.constant_initializer(0.1))
        return tf.maximum(0.0, inputs) + alpha * tf.minimum(0.0, inputs)


def batch_norm_act(inputs, is_training, act_type='prelu', bn=False):
    """
    Performs a batch normalization followed by a activation function.

    Parameters
    ----------
    inputs:
    is_training:
    act_type:
    bn:

    Returns
    -------

    """
    if bn:
        inputs = tf.layers.batch_normalization(
            inputs=inputs, axis=3,
            momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
            scale=True, training=is_training, fused=True)
    if act_type == 'prelu':
        inputs = prelu(inputs)
    else:
        inputs = tf.nn.relu(inputs)
    return inputs


def conv2d(inputs, filters, kernel_size, strides=1):
    """
    Stride 2-D convolution with explicit padding

    Parameters
    ----------
    inputs:
    filters:
    kernel_size:
    strides:

    Returns
    -------

    """
    return tf.layers.conv2d(
        inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
        padding='SAME', use_bias=False,
        kernel_initializer=tf.variance_scaling_initializer())


def fc_bn_act(inputs, filters,
              is_training=True, act_type='prelu', bn=False):
    """
    get a output which was performed fully connect,
    batch normalization and activation

    Parameters
    ----------
    inputs
    filters
    is_training
    act_type
    bn

    Returns
    -------

    """
    inputs = tf.layers.dense(inputs, filters)
    inputs = batch_norm_act(inputs, is_training, act_type, bn)
    return inputs


def conv_bn_act(inputs, filters, kernel_size,
                strides=1, is_training=True, act_type='prelu', bn=False):
    """
    get a output which was performed convolution,
    batch normalization and activation

    Parameters
    ----------
    inputs
    filters
    kernel_size
    strides
    is_training
    act_type
    bn

    Returns
    -------

    """
    inputs = conv2d(inputs, filters, kernel_size, strides)
    inputs = batch_norm_act(inputs, is_training, act_type, bn)
    return inputs


def get_model(bn=False, num_classes=10):
    """
    cascaded mtl model

    Parameters
    ----------
    bn: performs batch normalization or not
    num_classes:

    Returns
    -------

    """

    def _model(inputs, is_training):
        inputs = tf.identity(inputs, name='input_image')
        inputs = conv_bn_act(inputs, 16, 9, is_training=is_training, bn=bn)
        inputs = conv_bn_act(inputs, 32, 7, is_training=is_training, bn=bn)

        hl_prior = conv_bn_act(inputs, 16, 9, is_training=is_training, bn=bn)
        hl_prior = tf.layers.max_pooling2d(hl_prior, 2, 2)
        hl_prior = conv_bn_act(hl_prior, 32, 7, is_training=is_training, bn=bn)
        hl_prior = tf.layers.max_pooling2d(hl_prior, 2, 2)
        hl_prior = conv_bn_act(hl_prior, 16, 7, is_training=is_training, bn=bn)
        hl_prior_1 = conv_bn_act(hl_prior, 8, 7, is_training=is_training, bn=bn)
        hl_prior = tf.layers.max_pooling2d(hl_prior_1, 2, 2)
        hl_prior = conv_bn_act(hl_prior, 4, 1, is_training=is_training, bn=bn)
        hl_prior = tf.layers.flatten(hl_prior)
        hl_prior = fc_bn_act(hl_prior, 512, is_training=is_training, bn=bn)
        hl_prior = tf.layers.dropout(hl_prior, training=is_training)
        hl_prior = fc_bn_act(hl_prior, 256, is_training=is_training, bn=bn)
        hl_prior = tf.layers.dropout(hl_prior, training=is_training)
        hl_prior = fc_bn_act(hl_prior, num_classes, is_training=is_training, bn=bn)

        de_stage = conv_bn_act(inputs, 20, 7, is_training=is_training, bn=bn)
        de_stage = tf.layers.max_pooling2d(de_stage, 2, 2)
        de_stage = conv_bn_act(de_stage, 40, 5, is_training=is_training, bn=bn)
        de_stage = tf.layers.max_pooling2d(de_stage, 2, 2)
        de_stage = conv_bn_act(de_stage, 20, 5, is_training=is_training, bn=bn)
        de_stage = conv_bn_act(de_stage, 10, 5, is_training=is_training, bn=bn)
        de_stage = tf.concat([de_stage, hl_prior_1], 3)
        de_stage = conv_bn_act(de_stage, 24, 3, is_training=is_training, bn=bn)
        de_stage = conv_bn_act(de_stage, 32, 3, is_training=is_training, bn=bn)
        de_stage = tf.layers.conv2d_transpose(de_stage, 16, [4, 4], 2,
                                              padding='SAME', use_bias=False,
                                              kernel_initializer=tf.variance_scaling_initializer())
        de_stage = batch_norm_act(de_stage, is_training, bn=bn)
        de_stage = tf.layers.conv2d_transpose(de_stage, 8, [4, 4], 2,
                                              padding='SAME', use_bias=False,
                                              kernel_initializer=tf.variance_scaling_initializer())
        de_stage = batch_norm_act(de_stage, is_training, bn=bn)
        de_stage = conv_bn_act(de_stage, 1, 1, act_type='relu', is_training=is_training, bn=bn)
        hl_prior = tf.identity(hl_prior, name='high_level')
        de_stage = tf.identity(de_stage, name='density_map')
        return hl_prior, de_stage

    return _model
