#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 2018/1/23 15:46
@desc: 
"""
import argparse
import logging
import os
import time

import cv2
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from skimage import transform

from crowdcounting.data.tools import check_dat_and_img_file
from crowdcounting.data.tools import decode_dat_file
from crowdcounting.data.tools import get_density_map
from crowdcounting.test.tools import convert_density_map
from crowdcounting.test.tools import preprocess_image
from crowdcounting.test.tools import rotate_image
from utils.basic import get_file_name
from utils.list import create_file_list


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--image-root', type=str, default='', dest='root',
        help='The directory where the image data is stored.'
             '保存图像与标记数据的根目录。')
    parser.add_argument(
        '--prefix', help='postfix to file', default='mtl',
        dest='prefix', type=str)
    parser.add_argument(
        '--model-dir', type=str, default='', dest='model',
        help='The directory where the pd model will be stored.'
             '保存模型的目录')
    parser.add_argument(
        '--output-root', type=str, default='', dest='output',
        help='the root for all the output.'
             '输出结果的根目录')
    parser.add_argument('--scale', type=float, default=1.0,
                        help='图像缩小系数')
    parser.add_argument('--high', type=float, default=7.0,
                        help='摄像头高度')
    return parser.parse_args()


def predict_image(image_list, model='',
                  sub_shape=(320, 320), input_shape=(256, 256), scale=1.0):
    """
    预测图像中的人群密度和数量

    Parameters
    ----------
    image_list: lists, 图像列表
    model: 模型的路径
    sub_shape: 大图切割的子图的大小
    input_shape: 输入网络的图像大小
    scale: float, resize_shape / input_shape, 取值(0,1]. 决定子图在缩小时相当于input_shape的大小

    Returns
    -------
    mask: density map
    """
    start_time = time.time()

    # 预载入和定义某些参数
    mask_factor = np.load('../../data/mask.npy')
    shape = (1280, 1280)
    resize_shape = (int(round(input_shape[0] * scale)), int(round(input_shape[1] * scale)))
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:

        # 导入模型
        meta_graph_def = tf.saved_model.loader.load(sess, ['frozen_model'], model)
        signature = meta_graph_def.signature_def
        image_tensor_name = signature['predict_images'].inputs['images'].name
        pre_density_map_name = signature['predict_images'].outputs['density_map'].name
        image_tensor = sess.graph.get_tensor_by_name(image_tensor_name)
        density_map_name = sess.graph.get_tensor_by_name(pre_density_map_name)

        stage_time = time.time()
        logging.info('Time: {:.3f}s, loading model completed !'.format(stage_time - start_time))
        start_time = stage_time

        # 一些统计量
        mae, mse = 0.0, 0.0
        gt_total, pre_total = 0.0, 0.0

        # 使用GPU计算
        with tf.device("/gpu:0"):
            for idx, paths in enumerate(image_list):
                ann_path, img_path = paths.split('&!&')

                # 读入图像
                img = cv2.imread(img_path)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                img_shape = img.shape
                if not (img_shape[0] == img_shape[1] == 1280):
                    img = cv2.resize(img, shape)

                # 初始化，用于保存density map 的值
                mask = np.zeros(shape).astype(np.float32)

                # 处理外围图像区域
                for angle in range(0, 180, 30):
                    offset, img_buf = rotate_image(angle, img)
                    img1 = np.expand_dims(preprocess_image(img_buf[40:360, 480:800, :],
                                                           shape=input_shape, resize_shape=resize_shape), axis=0)
                    img2 = np.expand_dims(preprocess_image(img_buf[920:1240, 480:800, :],
                                                           shape=input_shape, resize_shape=resize_shape), axis=0)
                    density_map1 = sess.run([density_map_name], feed_dict={image_tensor: img1})
                    density_map2 = sess.run([density_map_name], feed_dict={image_tensor: img2})
                    buffer = np.zeros(shape)
                    buffer[40:360, 480:800] = convert_density_map(np.squeeze(density_map1),
                                                                  shape=sub_shape, resize_shape=resize_shape)
                    buffer[920:1240, 480:800] = convert_density_map(np.squeeze(density_map2),
                                                                    shape=sub_shape, resize_shape=resize_shape)
                    if 0 != angle:
                        buffer = transform.rotate(buffer, -angle, resize=True, preserve_range=True)
                        buffer = buffer[offset[0]:(offset[0] + shape[0]), offset[1]:(offset[1] + shape[1])]
                    mask += buffer

                # 处理中心图像区域
                small_shape = (720, 720)
                small_img = img[280:1000, 280:1000, :]
                for angle in range(0, 360, 72):
                    offset, small_img_buf = rotate_image(angle, small_img)
                    img_patch = np.expand_dims(preprocess_image(small_img_buf[80:400, 240:560, :],
                                                                shape=input_shape, resize_shape=resize_shape), axis=0)
                    density_map = sess.run([density_map_name], feed_dict={image_tensor: img_patch})
                    buffer = np.zeros(small_shape)
                    buffer[80:400, 240:560] = convert_density_map(np.squeeze(density_map),
                                                                  shape=sub_shape, resize_shape=resize_shape)
                    if 0 != angle:
                        buffer = transform.rotate(buffer, -angle, resize=True, preserve_range=True)
                        buffer = buffer[offset[0]:(offset[0] + small_shape[0]), offset[1]:(offset[1] + small_shape[1])]
                    mask[280:1000, 280:1000] += buffer

                # mask 各个元素与其因子相乘，得到最终预测的density map
                mask *= mask_factor

                # 统计预测信息
                points = decode_dat_file(ann_path)
                gt_number = len(points)
                pre_number = np.sum(mask)
                logging.info('gt_number: {}, pre_number: {}'.format(gt_number, pre_number))
                gt_total += gt_number
                pre_total += pre_number
                mae += abs(gt_number - pre_number)
                mse += (gt_number - pre_number) ** 2

                # 可视化，画出预测的density map 和ground true density map。
                points = list(map(lambda x: [int(round(shape[1] * x[1] / img_shape[1])),
                                             int(round(shape[0] * x[0] / img_shape[0]))], points))
                gt_density_map = get_density_map(shape, points)
                mask = cv2.GaussianBlur(mask, (15, 15), 0.5)
                plt.title(img_path)
                plt.figure(figsize=(34, 10), dpi=200)
                plt.subplot(131)
                plt.title(img_path)
                plt.imshow(img)
                plt.subplot(132)
                plt.title('ground truth: {}'.format(gt_number))
                sns.heatmap(gt_density_map * 6, vmin=0,
                            vmax=np.amax(gt_density_map), cbar=False)
                plt.subplot(133)
                plt.title('prediction: {}'.format(pre_number))
                mask = cv2.GaussianBlur(mask, (15, 15), 1)
                sns.heatmap(mask * 6, vmin=0, vmax=np.amax(mask), cbar=False)
                img_name = get_file_name(img_path)
                plt.savefig(os.path.join(PARAMS.output, '{}_{}_{}'.format(PARAMS.prefix, PARAMS.high, PARAMS.scale),
                                         img_name + '_result.jpg'))
                plt.close()

                stage_time = time.time()
                logging.info('Time: {:.3f}s, processed {} of {} completed !'.format(
                    stage_time - start_time, idx, len(image_list)))
                start_time = stage_time

            # 打印结果
            logging.info('MAE: {:.5f}, MSE: {:.5f}'.format(mae / len(image_list), mse / len(image_list)))
            logging.info('AVE: {:.5f}'.format(gt_total / len(image_list)))
            logging.info('accuracy: {:.5f} ----- 1 - MAE/AVE'.format(1.0 - mae / gt_total))
            logging.info('gt_total: {}, pre_total: {:.5f}'.format(gt_total, pre_total))
            logging.info('recall: {:.5f} ----- 1 - abs(gt_total - pre_total) / gt_total'.format(
                1 - abs(gt_total - pre_total) / gt_total))
            return


if __name__ == '__main__':
    PARAMS = parse_args()
    logging.basicConfig(filename=os.path.join(PARAMS.output,
                                              '{}_{}_{}_result.log'.format(PARAMS.prefix, PARAMS.high, PARAMS.scale)),
                        level=logging.INFO)
    if not os.path.exists(os.path.join(PARAMS.output, '{}_{}_{}'.format(PARAMS.prefix, PARAMS.high, PARAMS.scale))):
        os.makedirs(os.path.join(PARAMS.output, '{}_{}_{}'.format(PARAMS.prefix, PARAMS.high, PARAMS.scale)))
    file_list, _ = create_file_list(PARAMS.root, filtering=check_dat_and_img_file, params='&!&')
    np.random.shuffle(file_list)
    predict_image(file_list[:20], PARAMS.model, scale=PARAMS.scale)
