#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: chenjun
@contact: gangkanli1219@gmail.com
@time: 1/3/18 5:18 PM
@desc: trying to speed up predicting
"""
from __future__ import division
import os

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import time
from object_detection.utils import label_map_util
import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_utils
from utils.basic import get_file_name
from utils.detector import load_model
from utils.detector import run_detection
from utils.evaluator import eval_detect_result
from utils.evaluator import read_xml_as_eval_info
from utils.io import get_label_from_pd_file
from utils.io import get_label_list_from_category_index

from result_analysis import ResultAnalyzer


def is_overlap(rect1, rect2):
    """
    判断矩形是否存在重叠

    Parameters
    ----------
    rect1: 矩形1，形如[axis1_min, axis2_min, axis1_max, axis2_max]
    rect2: 矩形2，形如[axis1_min, axis2_min, axis1_max, axis2_max]

    Returns
    -------

    """
    return not ((rect1[0] >= rect2[2]) or
                (rect1[1] >= rect2[3]) or
                (rect1[2] <= rect2[0]) or
                (rect1[3] <= rect2[1]))


def get_overlap_area(rect1, rect2):
    """
    计算两个矩形的重叠部分的面积，如果不重叠返回0

    Parameters
    ----------
    rect1: 矩形1，形如[axis1_min, axis2_min, axis1_max, axis2_max]
    rect2: 矩形2，形如[axis1_min, axis2_min, axis1_max, axis2_max]

    Returns
    -------

    """
    if not is_overlap(rect1, rect2):
        return 0.0
    xmin = max(rect1[0], rect2[0])
    ymin = max(rect1[1], rect2[1])
    xmax = min(rect1[2], rect2[2])
    ymax = min(rect1[3], rect2[3])
    return (xmax - xmin) * (ymax - ymin)


def merge_boxes(all_boxes, all_classes, all_scores, overlap_percent):
    """
    合并重叠度过高的矩形框矩形框
    :param overlap_percent:
    :param all_boxes:
    :param all_classes:
    :param all_scores:
    :return: 无
    """
    need_del_idx = []  # 保存需要删除的box的索引
    for idx, box in enumerate(all_boxes):
        if idx not in need_del_idx:
            for id_2, box_2 in enumerate(all_boxes[idx + 1:], idx + 1):
                overlap_area = get_overlap_area(box, box_2)
                src_area = min((box[2] - box[0]) * (box[3] - box[1]),
                               (box_2[2] - box_2[0]) * (box_2[3] - box_2[1]))
                if src_area == 0:
                    need_del_idx += [id_2]
                    continue
                overlap_rate = overlap_area / src_area
                if overlap_rate > overlap_percent:
                    if id_2 not in need_del_idx:
                        need_del_idx += [id_2]
    need_del_idx = sorted(need_del_idx, reverse=True)
    for i in need_del_idx:
        if i >= len(all_boxes):
            print("Error", i, len(all_boxes))
        del (all_boxes[i])
        del (all_classes[i])
        del (all_scores[i])

    return None


def compute_recall_and_precision(ground_true, predictions):
    """

    :param ground_true: 真实值
    :param predictions: 预测值
    :return: 查准率和召回率
    """
    tp = 0
    tr = 0
    fa = 0
    for idx in range(len(ground_true)):
        if ground_true[idx] == predictions[idx]:
            tp = tp + 1
    for idx in range(len(ground_true)):
        if predictions[idx] != 0:
            tr = tr + 1
        if ground_true[idx] != 0:
            fa = fa + 1

    Precision = float(tp / tr)
    Recall = float(tp / fa)
    return Precision, Recall


def fileter_boxes(boxes, classes, scores, img_shape, threshold=0.4):
    """
    过滤掉一些不符合明显错误的boundingbox
    :param boxes:
    :param classes:
    :param scores:
    :param img_shape:
    :param threshold:
    :return:
    """
    height, width, _ = img_shape
    mask = np.zeros((height, width), dtype=np.uint8)
    for idx, box in enumerate(boxes):
        if scores[idx] >= threshold:
            mask[int(box[0] * height):int(box[2] * height), int(box[1] * width):int(box[3] * width)] = 255
    # cv2.imshow("mask", mask)
    # cv2.waitKey()
    need_del_idx = []

    for idx in range(len(boxes)):
        if scores[idx] >= threshold:
            continue
        box_width = int(boxes[idx][3] * width - boxes[idx][1] * height)
        box_height = int(boxes[idx][2] * height - boxes[idx][0] * height)
        box_t_l = (int(boxes[idx][1] * width), int(boxes[idx][0] * height))  # 左上角坐标(xmin, ymin)
        box_t_r = (box_t_l[0] + box_width, box_t_l[1])  # 右上角坐标(x, y)
        box_area = box_width * box_height
        if box_area == 0:
            need_del_idx.append(idx)
            continue
        start_y = box_t_l[1]
        end_y = box_t_l[1] + box_height
        start_x = box_t_l[0] - box_width
        end_x = box_t_l[0]
        mask_area = np.sum(mask[start_y:end_y, start_x:end_x])
        if np.sum(mask[box_t_l[1]:box_t_l[1] + box_height, box_t_l[0] - box_width:box_t_l[0]]) / (255 * box_area) < 0.3 \
                or np.sum(mask[box_t_r[1]:box_t_r[1] + box_height, box_t_r[0]:box_t_r[0] + box_width]) / (
                255 * box_area) < 0.3:
            need_del_idx.append(idx)

    need_del_idx = sorted(need_del_idx, reverse=True)
    print(need_del_idx, len(boxes))
    for i in need_del_idx:
        if i >= len(boxes):
            print("Error", i, len(boxes))
        del (boxes[i])
        del (classes[i])
        del (scores[i])


def predict_image(image_path, label_map_path, checkpoint, image_result_save_dir,
                  score=0.4, percent=0.3, work_book_name='result.xlsx'):
    # 初始化检测结果分析器
    result_analyzer = ResultAnalyzer(os.path.join(image_result_save_dir, work_book_name), "class_nums.json")

    start_time = time.time()
    detection_graph = load_model(checkpoint)
    label_map_dict = label_map_util.get_label_map_dict(label_map_path)
    category_index = get_label_from_pd_file(label_map_path, len(label_map_dict))
    label_list = get_label_list_from_category_index(category_index)
    label_map_dict_new = {value: key for key, value in label_map_dict.items()}

    # 配置文件
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    stage_time = time.time()
    ground_true = []
    predictions = []
    # all_pic_recall = []
    # all_pic_pre = []

    print('loading models completed!,time is', stage_time - start_time)
    count = 0
    start_time = time.time()
    # 遍历文件夹下所有图片
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            for idx, image_name in enumerate(os.listdir(image_path)):
                if image_name.split('.')[-1] == 'png' or image_name.split('.')[-1] == 'jpg':
                    print('predicting image:', (os.path.join(image_path, image_name)))
                    # 获得图片名字，没有后缀
                    img_head_name = get_file_name(image_name)
                    count += 1
                    print("正在预测第 " + str(count) + "张图片")
                    img = cv2.imread(os.path.join(image_path, image_name))
                    # original_shape = img.shape
                    output_img = img.copy()
                    # opencv读取的图片的通道为BGR
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    xml_path = os.path.join(image_path, img_head_name + '.xml')

                    img = np.expand_dims(img, axis=0)
                    _boxes, _classes, _scores = run_detection(sess, detection_graph, img)
                    boxes, classes, scores = [], [], []
                    # 刷选置信度大于score的框，
                    for box, cls, sc in zip(_boxes, _classes, _scores):
                        if 0.1 < sc < 0.4:
                            print("Ops, one hit.")
                        if 0.4 <= sc:
                            boxes.append(box)
                            classes.append(cls)
                            scores.append(sc)
                    if len(scores) != 0:
                        print("min scores: ", min(scores))
                    print("Len before merge: ", len(boxes))
                    merge_boxes(boxes, classes, scores, percent)
                    print(classes)

                    print("Len after merge(before filter): ", len(boxes))
                    # fileter_boxes(boxes, classes, scores, img.shape[1:], score)
                    # print("Len after filter: ", len(boxes))

                    objects = read_xml_as_eval_info(xml_path, label_list)['objects']
                    # 真实的gt_boxes
                    gt_classes, gt_boxes = np.hsplit(np.array(objects), [1])

                    if len(objects) == 0:
                        gt_boxes = []
                        gt_classes = []
                    else:
                        gt_classes = list(gt_classes.flatten())

                        # ([ xmin, ymin, xmax, ymax])变成ymin, xmin, ymax, xmax]
                        gt_boxes = gt_boxes[:, (1, 0, 3, 2)]
                        gt_boxes = list(map(lambda x: list(x), gt_boxes))

                    # 预测的框:绿色np.ones((len(_classes),), dtype=np.int32)

                    vis_utils.visualize_boxes_and_labels_on_image_array(
                        output_img, np.array(boxes), np.ones((len(classes),), dtype=np.int32), scores,
                        label_map_dict,
                        use_normalized_coordinates=True,
                        max_boxes_to_draw=200,
                        skip_labels=True,
                        min_score_thresh=0.0,
                        line_thickness=3)
                    if image_result_save_dir != '':
                        cv2.imwrite(os.path.join(image_result_save_dir, image_name), output_img)
                    gt_classes = list(map(lambda x: x + 1, gt_classes))
                    gt, pred = eval_detect_result(gt_boxes, gt_classes, boxes, classes, default_class=0)
                    # 分析并写入检测结果
                    result_analyzer.analysis_and_write_single_image_result_2_sheet(image_name, "caffe",
                                                                                   label_map_dict_new, gt_boxes,
                                                                                   gt_classes, boxes, classes)

                    ground_true += gt
                    predictions += pred

    end_time = time.time()
    print("total time: ", end_time - start_time)
    print("average time: ", (end_time - start_time) / 54.0)
    precision, recall = compute_recall_and_precision(ground_true, predictions)

    # 得到混淆矩阵并写入工作表
    result_analyzer.get_and_write_confusion_matrix(label_map_dict_new)

    print('Total Recall: ', recall)
    print('Total Precision: ', precision)

    # 保存到工作表
    result_analyzer.save()

    return precision, recall


if __name__ == '__main__':
    pb_directory = r"C:\Users\ty\Desktop\models"
    pb_names = os.listdir(pb_directory)
    for pb_name in pb_names:
        pb_path = os.path.join(pb_directory, pb_name)
        predict_image(image_path=r"C:\Users\ty\Desktop\004",
                      label_map_path=r'C:\Users\ty\Desktop\pbtxt\label_map_ncor_ncon.pbtxt',
                      checkpoint=pb_path, score=0.4,
                      percent=0.3, image_result_save_dir=r'C:\Users\ty\Desktop\004_ret')
