#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 12/28/17 3:59 PM
@desc: load crowd counting data used for model
"""
import os

import tensorflow as tf


def generate_gaussian_kernel(shape, sigma):
    """
    generate 2d gaussian kernel, default mean is zero
    Parameters
    ----------
    shape
    sigma

    Returns
    -------

    """
    rows, cols = shape

    def get_gaussian_fct(size):
        kernel1d = tf.linspace(tf.to_float(0), tf.to_float(size), size)
        kernel1d -= tf.to_float(size / 2)
        kernel1d = kernel1d ** 2
        kernel1d /= (2 * sigma ** 2)
        return tf.exp(-kernel1d)

    kernel1d_rows = get_gaussian_fct(rows)
    kernel1d_cols = get_gaussian_fct(cols)
    kernel = tf.multiply(tf.reshape(kernel1d_rows, [rows, 1]),
                         tf.reshape(kernel1d_cols, [1, cols]))
    return kernel / tf.reduce_sum(kernel)


def get_density_map(shape, points, kernel_size=15, sigma=4.0):
    """
    create a density map for crowd counting

    Parameters
    ----------
    shape: shape of density map
    points: points of human
    kernel_size: base size of the gaussian filter
    sigma:

    Returns
    -------

    """
    half_ks = kernel_size // 2

    def _density_map(point, density_map):
        h = tf.minimum(shape[0] - 1,
                       tf.maximum(0, tf.to_int32(tf.round(point[0] * shape[0]))))
        w = tf.minimum(shape[1] - 1,
                       tf.maximum(0, tf.to_int32(tf.round(point[1] * shape[1]))))
        ks_h = tf.minimum(h, tf.minimum(half_ks, shape[0] - h - 1)) * 2 + 1
        ks_w = tf.minimum(w, tf.minimum(half_ks, shape[1] - w - 1)) * 2 + 1
        pad = tf.concat([tf.reshape(h - ks_h // 2, (1,)), tf.reshape(shape[0] - 1 - h - ks_h // 2, (1,)),
                         tf.reshape(w - ks_w // 2, (1,)), tf.reshape(shape[1] - 1 - w - ks_w // 2, (1,))],
                        axis=0)
        pad = tf.reshape(pad, (2, 2))
        kernel = tf.pad(generate_gaussian_kernel((ks_h, ks_w), sigma), pad)
        return density_map + kernel

    res_kernel = tf.zeros(shape[:2], tf.float32)
    res_kernel = tf.foldl(lambda a, x: _density_map(x, a), points, res_kernel)
    return res_kernel


def mean_image_subtraction(image, means):
    """
    Subtracts the given means from each image channel.

    For example:
      means = [123.68, 116.779, 103.939]
      image = mean_image_subtraction(image, means)

    Note that the rank of `image` must be known.

    Parameters
    ----------
    image: a tensor of size [height, width, C].
    means: a C-vector of values to subtract from each channel.

    Returns
    -------
    the centered image.

    """
    num_channels = image.get_shape().as_list()[-1]
    channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
    for i in range(num_channels):
        channels[i] -= means[i]
    return tf.concat(axis=2, values=channels)


def file_names(is_training, data_dir, prefix, postfix):
    """
    Return file_names for dataset.

    Parameters
    ----------
    is_training:
    data_dir: dataset path
    prefix:
    postfix: dataset tag

    Returns
    -------

    """
    if is_training:
        return [os.path.join(data_dir, '{}_train_{}.record'.format(prefix, postfix))]
    else:
        return [os.path.join(data_dir, '{}_val_{}.record'.format(prefix, postfix))]


def record_parser(value, class_num, means=None):
    """
    Parse an data record from `value`.

    Parameters
    ----------
    value: an data record
    class_num: class number
    means

    Returns
    -------

    """
    keys_to_features = {
        'image':
            tf.FixedLenFeature([], dtype=tf.string, default_value=''),
        'label':
            tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
        'points':
            tf.FixedLenFeature([], dtype=tf.string, default_value=''),
        'number':
            tf.FixedLenFeature([], dtype=tf.int64, default_value=-1)
    }
    parsed = tf.parse_single_example(value, keys_to_features)
    image = tf.image.decode_image(tf.reshape(parsed['image'], shape=[]), 3)
    size = 256
    shape = (size, size, 3)
    image = tf.cast(image, dtype=tf.float32)
    image = tf.reshape(image, [320, 320, 3])
    image = tf.expand_dims(image, 0)
    resize_image = tf.image.resize_bilinear(image, [size, size],
                                            align_corners=False)
    image = tf.squeeze(resize_image)
    image = mean_image_subtraction(image, means)
    points = tf.decode_raw(parsed['points'], tf.float64)
    number = tf.cast(tf.reshape(parsed['number'], shape=[]), dtype=tf.int32)
    points = tf.reshape(points, [number, 2])
    density_map = get_density_map(shape, points)
    label = tf.cast(tf.reshape(parsed['label'], shape=[]), dtype=tf.int32)
    return image, density_map, tf.one_hot(label, class_num + 1)


def input_fn(is_training, data_dir, batch_size,
             num_epochs=1, class_num=10, prefix='', postfix='', means=None):
    """
    Input function which provides batches for train or eval.

    Parameters
    ----------
    is_training:
    data_dir:
    batch_size:
    num_epochs:
    class_num:
    prefix:
    postfix:
    means:

    Returns
    -------

    """
    dataset = tf.data.TFRecordDataset(file_names(is_training, data_dir, prefix, postfix))
    dataset = dataset.repeat(num_epochs)
    if is_training:
        # todo if is training stage shuffle the data
        # When choosing shuffle buffer sizes, larger sizes result in better
        # randomness, while smaller sizes have better performance.
        dataset = dataset.shuffle(1024)
    dataset = dataset.map(lambda value: record_parser(value, class_num, means),
                          num_parallel_calls=8)
    dataset = dataset.batch(batch_size)
    iterator = dataset.make_one_shot_iterator()
    images, density_map, label = iterator.get_next()
    return images, (density_map, label)


if __name__ == '__main__':
    import numpy as np
    pos = tf.constant(np.array([[1, 2], [2, 2], [3, 4], [2, 2], [2, 4]]) / 6)
    ten = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
    config = tf.ConfigProto(device_count={"CPU": 1})
    with tf.Session(config=config) as sess:
        with tf.device("/cpu:0"):
            ker = get_density_map((6, 6), pos, 3)
            ker0 = generate_gaussian_kernel((5, 5), 2)
            print(sess.run(ker0))
            print(sess.run(ker))
