#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 12/28/17 7:00 PM
@desc: define the tensorflow trainable model
"""
import tensorflow as tf

from crowdcounting.mtl.model import get_model


def compute_mse_loss(density_map, ground_true):
    sub = tf.reduce_sum((density_map - ground_true) ** 2, axis=[1, 2])
    return tf.reduce_mean(tf.sqrt(sub))


def compute_mse(density_map, ground_true):
    sum_dm = tf.reduce_sum(density_map, axis=[1, 2])
    sum_gt = tf.reduce_sum(ground_true, axis=[1, 2])
    return tf.metrics.mean(tf.sqrt(tf.reduce_mean((sum_dm - sum_gt) ** 2)))


def compute_mae(density_map, ground_true):
    sum_dm = tf.reduce_sum(density_map, axis=[1, 2])
    sum_gt = tf.reduce_sum(ground_true, axis=[1, 2])
    return tf.metrics.mean(tf.reduce_mean(tf.abs(sum_gt - sum_dm)))


def model_fn(features, labels, mode, params):
    """
    Our model_fn for crowd counting

    Parameters
    ----------
    features
    labels
    mode
    params

    Returns
    -------

    """
    tf.identity(features, 'images')
    tf.summary.image('images', features)
    network = get_model(params['bn'], params['class_num'] + 1)
    density_map, label = labels
    hl_prior, de_stage = network(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
    de_stage = tf.squeeze(de_stage)
    predictions = {
        'classes': tf.argmax(hl_prior, axis=1),
        'probabilities': tf.nn.softmax(hl_prior, name='softmax_tensor'),
        # todo add a prediction task: predict the number of people
        'number': tf.to_int64(tf.reduce_sum(de_stage))
    }
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Calculate loss, which includes binary cross entropy.
    cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=label, logits=hl_prior)

    # Create a tensor named cross_entropy for logging purposes.
    tf.identity(cross_entropy, name='cross_entropy')
    tf.summary.scalar('cross_entropy', cross_entropy)

    # todo mse loss
    mse_loss = compute_mse_loss(de_stage, density_map)
    tf.losses.add_loss(mse_loss)

    # Create a tensor named mse_loss for logging purposes.
    tf.identity(mse_loss, name='mse_loss')
    tf.summary.scalar('mse_loss', mse_loss)

    # Add weight decay to the loss. We exclude the batch norm variables because
    # doing so leads to a small improvement in accuracy.
    loss = 0.0001 * cross_entropy + mse_loss

    if mode == tf.estimator.ModeKeys.TRAIN:
        # Scale the learning rate linearly with the batch size. When the batch size
        # is 256, the learning rate should be 0.1.
        initial_learning_rate = 0.004 * params['batch_size'] / 256
        batches_per_epoch = params['train_number'] / params['batch_size']
        global_step = tf.train.get_or_create_global_step()

        # Multiply the learning rate by 0.1 at 15 and 18 epochs.
        boundaries = [int(batches_per_epoch * epoch) for epoch in [15, 18]]
        values = [initial_learning_rate * decay for decay in [1, 0.1, 0.01]]
        learning_rate = tf.train.piecewise_constant(
            tf.cast(global_step, tf.int32), boundaries, values)

        # Create a tensor named learning_rate for logging purposes.
        tf.identity(learning_rate, name='learning_rate')
        tf.summary.scalar('learning_rate', learning_rate)

        # todo using adam optimizer
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

        # Batch norm requires update_ops to be added as a train_op dependency.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = optimizer.minimize(loss, global_step)
    else:
        train_op = None

    accuracy = tf.metrics.accuracy(
        tf.argmax(label, axis=1), predictions['classes'])
    metrics = {'accuracy': accuracy,
               'MSE': compute_mse(density_map, de_stage),
               'MAE': compute_mae(density_map, de_stage)}

    # Create a tensor named train_accuracy for logging purposes.
    tf.identity(accuracy[1], name='train_accuracy')
    tf.summary.scalar('train_accuracy', accuracy[1])

    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=metrics)
