#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 1/3/18 5:18 PM
@desc: trying to speed up predicting
"""
import argparse
import logging
import os
import time

import cv2
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_utils
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score

from crowdcounting.test.tools import rotate_image
from utils.basic import get_file_name
from utils.detector import load_model
from utils.detector import run_detection
from utils.evaluator import convert_region_box_to_global
from utils.evaluator import eval_detect_result
from utils.evaluator import merge_region_prediction
from utils.evaluator import read_xml_as_eval_info
from utils.evaluator import revolve_boxes
from utils.evaluator import get_revolve_info


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--image-root', type=str, default='', dest='root',
        help='The directory where the image data and the annotation is stored.'
             '保存图像与标记数据的根目录, 图像与其相应标记应当在同一级目录。')
    parser.add_argument(
        '--model-edge', type=str, default='', dest='model_edge',
        help='保存模型的目录edge')
    parser.add_argument(
        '--model-center', type=str, default='', dest='model_center',
        help='保存模型的目录center')
    parser.add_argument(
        '--output-root', type=str, default='', dest='output',
        help='the root for all the output.'
             '输出结果的根目录')
    return parser.parse_args()


def get_edge_boxes_classes_scores(boxes, classes, scores, config, detection_graph, image):
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                for angle in range(0, 180, 30):
                    _, img_buf = rotate_image(angle, image)
                    img_top = img_buf[40:400, 460:820, :]
                    img_bottom = img_buf[880:1240, 460:820, :]
                    img_bottom = np.flip(img_bottom, axis=0)
                    img_bottom = np.flip(img_bottom, axis=1)
                    img_top = np.expand_dims(img_top, axis=0).astype(np.uint8)
                    img_bottom = np.expand_dims(img_bottom, axis=0).astype(np.uint8)
                    top_boxes, top_classes, top_scores = run_detection(sess, detection_graph, img_top)
                    bottom_boxes, bottom_classes, bottom_scores = run_detection(sess, detection_graph, img_bottom)
                    bottom_boxes = np.vstack((1.0 - bottom_boxes[:, 2], 1.0 - bottom_boxes[:, 3],
                                              1.0 - bottom_boxes[:, 0], 1.0 - bottom_boxes[:, 1])).T
                    top_boxes, top_classes, top_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        top_boxes, top_classes, top_scores, '460_40')
                    bottom_boxes, bottom_classes, bottom_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        bottom_boxes, bottom_classes, bottom_scores, '460_880')
                    this_boxes = top_boxes + bottom_boxes
                    this_classes = top_classes + bottom_classes
                    this_scores = top_scores + bottom_scores
                    if 0 != angle:
                        revolve_mat, post_shape, offset = get_revolve_info(angle, (1280, 1280))
                        this_boxes = revolve_boxes(np.asarray(this_boxes), revolve_mat, (1280, 1280), post_shape)
                        this_boxes, this_classes, this_scores = convert_region_box_to_global(
                            {'shape': (1280, 1280), 'crop_shape': post_shape},
                            this_boxes, this_classes, this_scores, '{}_{}'.format(*offset), reverse=True)
                    boxes.extend(this_boxes)
                    scores.extend(this_scores)
                    classes.extend(this_classes)


def get_center_boxes_classes_scores(boxes, classes, scores, config, detection_graph, image):
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                center_seats = ((360, 360), (520, 360), (360, 520), (520, 520))
                for seat in center_seats:
                    this_img = image[seat[0]:(seat[0] + 400), seat[1]:(seat[1] + 400), :]
                    this_img = np.expand_dims(this_img, axis=0).astype(np.uint8)
                    this_boxes, this_classes, this_scores = run_detection(sess, detection_graph, this_img)
                    this_boxes, this_classes, this_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (400, 400)},
                        this_boxes, this_classes, this_scores, '{}_{}'.format(seat[1], seat[0]))
                    boxes.extend(this_boxes)
                    scores.extend(this_scores)
                    classes.extend(this_classes)


def predict_image(image_path, label_list, checkpoint_center='', checkpoint_edge='',
                  score=0.5, percent=0.8):
    start_time = time.time()
    detection_graph_center = load_model(checkpoint_center)
    detection_graph_edge = load_model(checkpoint_edge)
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    stage_time = time.time()
    logging.info('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                                   stage_time - start_time))
    logging.info('loading models completed !')
    start_time = stage_time
    ground_true = []
    predictions = []
    all_pic_recall = []
    all_pic_pre = []
    for idx, image_name in enumerate(os.listdir(image_path)):
        if image_name.split('.')[-1] == 'png' or image_name.split('.')[-1] == 'jpg':
            logging.info('predicting image: {}!'.format(os.path.join(image_path, image_name)))
            img_head_name = get_file_name(image_name)
            img = cv2.imread(os.path.join(image_path, image_name))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            xml_path = os.path.join(image_path, img_head_name + '.xml')

            _boxes, _classes, _scores = [], [], []
            get_center_boxes_classes_scores(
                _boxes, _classes, _scores, config, detection_graph_center, img)
            get_edge_boxes_classes_scores(
                _boxes, _classes, _scores, config, detection_graph_edge, img)

            stage_time = time.time()
            logging.info('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                                           stage_time - start_time))
            logging.info('predicting image completed !')
            start_time = stage_time

            boxes, classes, scores = [], [], []
            for box, cls, sc in zip(_boxes, _classes, _scores):
                if sc >= score:
                    boxes.append(box)
                    classes.append(cls)
                    scores.append(sc)
            _boxes, _classes, _scores = merge_region_prediction(
                np.array(boxes), np.array(scores), np.array(classes), percent)
            _boxes = np.array(_boxes)
            _classes = np.array(_classes).astype(np.int32)
            _scores = np.array(_scores)
            objects = read_xml_as_eval_info(xml_path, label_list)['objects']
            gt_classes, gt_boxes = np.hsplit(np.array(objects), [1])
            if len(objects) == 0:
                gt_boxes = []
                gt_classes = []
            else:
                gt_classes = list(gt_classes.flatten())
                gt_boxes = gt_boxes[:, (1, 0, 3, 2)]
                gt_boxes = list(map(lambda x: list(x), gt_boxes))
            vis_utils.visualize_boxes_and_labels_on_image_array(
                img, np.array(_boxes), np.array(_classes).astype(np.uint8), np.array(_scores),
                {1: {'id': 1, 'name': 'p'}},
                use_normalized_coordinates=True,
                min_score_thresh=score,
                line_thickness=3)
            plt.figure(figsize=(10, 10), dpi=250)
            plt.title(img_head_name)
            plt.imshow(img)
            plt.savefig(os.path.join(PARAMS.output, '{}.jpg'.format(idx)))
            plt.close()
            gt, pred = eval_detect_result(gt_boxes, list(map(lambda x: x + 1, gt_classes)),
                                          _boxes, _classes, default_class=0)
            ground_true += gt
            predictions += pred
            recall = recall_score(gt, pred)
            precision = precision_score(gt, pred)
            all_pic_recall.append(recall)
            all_pic_pre.append(precision)
            stage_time = time.time()
            logging.info('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                                           stage_time - start_time))
            logging.info('computing statistics completed !')
            start_time = stage_time

    recall = recall_score(ground_true, predictions)
    precision = precision_score(ground_true, predictions)
    logging.info('Total Recall: {:.5f}'.format(recall))
    logging.info('Total Precision: {:.5f}'.format(precision))
    logging.info('Mean Recall: {:.5f}'.format(np.mean(all_pic_recall)))
    logging.info('Mean Precision: {:.5f}'.format(np.mean(all_pic_pre)))


if __name__ == '__main__':
    PARAMS = parse_args()
    logging.basicConfig(level=logging.INFO)
    predict_image(
        image_path=PARAMS.root,
        label_list=['person'],
        checkpoint_center=PARAMS.model_center,
        checkpoint_edge=PARAMS.model_edge,
        score=0.5)
