# -*- coding: utf-8 -*-
# @Time    : 2018/10/24 15:28
# @Author  : cj
# @File    : predict1.0.py
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: chenjun
@contact: gangkanli1219@gmail.com
@time: 1/3/18 5:18 PM
@desc: trying to speed up predicting
检测原图的程序，六个模型串行
"""
from __future__ import division

import argparse
import time

import numpy as np
import tensorflow as tf

from src.detection.objectionutils import label_map_util
from src.detection.utils.detector import run_detection
from src.detection.utils.io import get_label_from_pd_file
from src.detection.utils.io import get_label_list_from_category_index
from src.utils.basic_utils import DetectResult, DetectedBox
from src.utils.context import Context


def compute_recall_and_precision(ground_true, predictions):
    """

    :param ground_true: 真实值
    :param predictions: 预测值
    :return: 查准率和召回率
    """
    tp = 0
    tr = 0
    fa = 0
    for idx in range(len(ground_true)):
        if ground_true[idx] == predictions[idx]:
            tp = tp + 1
    for idx in range(len(ground_true)):
        if predictions[idx] != 0:
            tr = tr + 1
        if ground_true[idx] != 0:
            fa = fa + 1

    precision = float(tp / tr)
    recall = float(tp / fa)
    return precision, recall


def parse_args():
    parser = argparse.ArgumentParser()
    # 对于函数add_argumen()
    # 第一个是选项，第二个是数据类型，第三个默认值，第四个是help命令时的说明
    parser.add_argument(
        '--image-root', type=str, default='', dest='root',
        help='The directory where the image data and the annotation is stored.'
             '保存图像与标记数据的根目录, 图像与其相应标记应当在同一级目录。')
    parser.add_argument(
        '--model-edge', type=str, default='', dest='model_edge',
        help='保存模型的目录edge')
    parser.add_argument(
        '--model-center', type=str, default='', dest='model_center',
        help='保存模型的目录center')
    parser.add_argument(
        '--output-root', type=str, default='', dest='output',
        help='the root for all the output.'
             '输出结果的根目录')
    return parser.parse_args()


def is_overlap(rect1, rect2):
    """
    判断矩形是否存在重叠

    Parameters
    ----------
    rect1: 矩形1，形如[axis1_min, axis2_min, axis1_max, axis2_max]
    rect2: 矩形2，形如[axis1_min, axis2_min, axis1_max, axis2_max]

    Returns
    -------

    """
    return not ((rect1[0] >= rect2[2]) or
                (rect1[1] >= rect2[3]) or
                (rect1[2] <= rect2[0]) or
                (rect1[3] <= rect2[1]))


def get_overlap_area(rect1, rect2):
    """
    计算两个矩形的重叠部分的面积，如果不重叠返回0

    Parameters
    ----------
    rect1: 矩形1，形如[axis1_min, axis2_min, axis1_max, axis2_max]
    rect2: 矩形2，形如[axis1_min, axis2_min, axis1_max, axis2_max]

    Returns
    -------

    """
    if not is_overlap(rect1, rect2):
        return 0.0
    xmin = max(rect1[0], rect2[0])
    ymin = max(rect1[1], rect2[1])
    xmax = min(rect1[2], rect2[2])
    ymax = min(rect1[3], rect2[3])
    return (xmax - xmin) * (ymax - ymin)


def merge_boxes(all_boxes, all_classes, all_scores, overlap_percent):
    """
    合并重叠度过高的矩形框矩形框
    :param overlap_percent:
    :param all_boxes:
    :param all_classes:
    :param all_scores:
    :return: 无
    """
    need_del_idx = []  # 保存需要删除的box的索引
    for idx, box in enumerate(all_boxes):
        if idx not in need_del_idx:
            for id_2, box_2 in enumerate(all_boxes[idx + 1:], idx + 1):
                overlap_area = get_overlap_area(box, box_2)
                src_area = min((box[2] - box[0]) * (box[3] - box[1]),
                               (box_2[2] - box_2[0]) * (box_2[3] - box_2[1]))
                overlap_rate = overlap_area / src_area
                if overlap_rate > overlap_percent:
                    if id_2 not in need_del_idx:
                        need_del_idx += [id_2]
    need_del_idx = sorted(need_del_idx, reverse=True)
    for i in need_del_idx:
        if i >= len(all_boxes):
            print("Error", i, len(all_boxes))
        del (all_boxes[i])
        del (all_classes[i])
        del (all_scores[i])

    return None


"""
# predict_image：函数功能，画出图片错标，漏标行人，黑色框为真实框，绿色为预测值，金色漏框，蓝色错框
#image_path:图片路径
#label_list：label_list=['person']
# checkpoint_center：中心模型的PB文件
# checkpoint_edge：边缘模型的PB文件
# score：置信度超过score才画出
框：[ymin, xmin, ymax, xmax]
"""


def predict_one_image(image, context: Context):
    start_time = time.time()
    all_result = []
    # 下载模型
    final_boxes = []
    final_scores = []
    final_classes = []

    original_shape = image.shape
    output_img = image.copy()
    # img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    img = image
    img = np.expand_dims(img, axis=0)
    for idx in range(len(context.detection_graph_list)):
        detection_graph = context.detection_graph_list[idx]
        label_class_name = context.label_class_path_list[idx]
        label_map_dict = label_map_util.get_label_map_dict(label_class_name)
        category_index = get_label_from_pd_file(label_class_name, len(label_map_dict))
        label_list = get_label_list_from_category_index(category_index)

        config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
        config.gpu_options.allow_growth = True
        stage_time = time.time()
        predictions = []

        start_time = time.time()
        # 配置文件
        # 遍历文件夹下所有图片
        with detection_graph.as_default():
            with tf.Session(graph=detection_graph, config=config) as sess:
                _boxes, _classes, _scores = run_detection(sess, detection_graph, img)
                boxes, classes, scores = [], [], []
                # 刷选置信度大于score的框，
                for box, cls, sc in zip(_boxes, _classes, _scores):
                    if sc >= context.confidence:
                        boxes.append(box)
                        classes.append(label_list[cls - 1])
                        scores.append(sc)
                merge_boxes(boxes, classes, scores, overlap_percent=0.5)
                final_boxes = final_boxes + boxes
                final_classes = final_classes + classes
                final_scores = final_scores + scores
    dboxes = []
    for box, cls, sc in zip(final_boxes, final_classes, final_scores):
        box = list(box)
        height, width, _ = original_shape
        dboxes.append(DetectedBox([[box[1] * width, box[0] * height],
                                   [box[3] * width, box[0] * height],
                                   [box[3] * width, box[2] * height],
                                   [box[1] * width, box[2] * height]], sc, str(cls)))

    all_result.append(DetectResult(output_img, dboxes))
    return DetectResult(output_img, dboxes)


def predict_image(image_list, context: Context):
    result = []
    for idx in range(len(image_list)):
        # print("predict images %d :" % (idx + 1))
        # start_time = time.time()
        result.append(predict_one_image(image_list[idx], context))
        # end_time = time.time()
        # print("predict images %d end  completed!,time is" % (idx + 1), end_time - start_time)

    return result
