#!/usr/bin/env python 
# -*- coding: utf-8 -*-
# @Time    : 2019/2/19 9:35
# @Author  : Tang Yang
# @Desc    : 检测结果评估器
# @File    : detect_result_evaluator.py
import numpy as np
from openpyxl import Workbook
from utils.basic import is_overlap, get_overlap_area


def _eval_detect_result_signle_image(pred_boxes, pred_classes, pred_scores, gt_boxes, gt_classes, threshold):
    """
    根据gt_boxes，gt_classes，pred_boxes，pred_classes, pred_scores计算gt_list和pred_list
    :threshold: 计算过程中采用的最低置信度
    :return: None
    """
    # 过滤置信度低于阈值的检测结果
    boxes, classes, scores = [], [], []
    for box, cls, sc in zip(pred_boxes, pred_classes, pred_scores):
        if sc >= threshold:
            boxes.append(box)
            classes.append(cls)
            scores.append(scores)

    gt_list = []
    pred_list = []
    _index = []  # 保存检测结果的boundingbox中位置（不包括标签）能和gt一一对应上的那些boundingbox的索引
    for box, cls in zip(boxes, classes):
        overlap_rate = 0.0  # 保存检查过程中出现过的最大的重叠面积
        response_cls = -1  # 与box位置对应的gt_box的类别，默认为-1，表示没有对应的gt_box
        for idx, (t_box, t_cls) in enumerate(zip(gt_boxes, gt_classes)):
            if is_overlap(t_box, box):
                area = get_overlap_area(t_box, box)
                src_area = min((box[2] - box[0]) * (box[3] - box[1]),
                               (t_box[2] - t_box[0]) * (t_box[3] - t_box[1]))
                if src_area <= 0:
                    continue
                if (area / src_area) > 0.3:
                    if (area / src_area) > overlap_rate:
                        overlap_rate = area / src_area  # 更新最大重叠面积
                        response_cls = t_cls
                        _index.append(idx)
        gt_list.append(0 if response_cls == -1 else response_cls)
        pred_list.append(cls)

    for idx in range(len(gt_classes)):
        if idx not in _index:  # 没能找到一一对应的表示是错检
            gt_list.append(gt_classes[idx])
            pred_list.append(0)
    gt_list = list(map(int, gt_list))
    pred_list = list(map(int, pred_list))
    return gt_list, pred_list


class EvaluationMatrix:
    def __init__(self, data: np.ndarray):
        self._data = data

    def save_excel(self, path, label_map_dict):
        """
        :param path: 要保存的Excel文件路径 
        :param label_map_dict: 类别编号到名称的隐射字典
        :return: None
        """
        label_map_dict = {value: key for key, value in label_map_dict.items()}
        workbook = Workbook()
        worksheet = workbook.active
        zero_row_col_idx = set(np.where(np.sum(self._data, axis=1) == 0)[0]) & set(
            np.where(np.sum(self._data, axis=0) == 0)[0])
        row, col = 1, 1
        matrix_row, matrix_col = -1, -1
        while matrix_row < self._data.shape[0]:
            matrix_col = -1
            col = 1
            while matrix_col < self._data.shape[1]:
                if matrix_row in zero_row_col_idx or matrix_col in zero_row_col_idx:
                    matrix_col += 1
                    continue
                if matrix_row == -1 and matrix_col == -1:
                    value = "Label"
                elif matrix_row == -1:
                    value = label_map_dict[matrix_col + 1]
                elif matrix_col == -1:
                    value = label_map_dict[matrix_row + 1]
                else:
                    value = int(self._data[matrix_row][matrix_col])
                matrix_col += 1
                worksheet.cell(row=row, column=col, value=value)
                col += 1
            matrix_row += 1
            if col != 1:
                row += 1
        workbook.save(path)

    @property
    def data(self):
        return self._data


class DetectResultEvaluator:
    """
    检测结果评估器，可以得出包含准确率，召回率，混淆矩阵，漏识别矩阵，F1 Score，mAP在内的监测结果评价指标
    """

    def __init__(self, gt_boxes: list, gt_classes: list, pred_boxes: list, pred_classes: list, pred_scores: list):
        """
        ctor
        :param gt_boxes: 测试数据集的所有图片的真实标记的boundingbox，形如 [[[ymin, xmin, ymax, xmax], [ymin_2, xmin_2, ymax_2, xmax_2], ...], ...]
                            列表中每一项都是一张图片的所有boundingbox
        :param gt_classes: 测试数据集的所有图片的真实label，形如 [label_1, label_2, ...]，与 gt_boxes 一一对应
        :param pred_boxes: 模型检测结果的所有图片的boundingbox，格式和 gt_boxes 相同
        :param pred_classes: 模型检测结果的所有图片的boundingbox的所有label，格式和 gt_classes 相同
        :param pred_scores: 模型检测结果的所有图片的boundingbox的score，形如 [sc_1, sc_2, ... ]，和 pred_boxes，pred_classes 一一对应
        """
        self._gt_boxes = gt_boxes
        self._gt_classes = gt_classes
        self._pred_boxes = pred_boxes
        self._pred_classes = pred_classes
        self._pred_scores = pred_scores
        # gt_list 和 pred_list 为列表，如果检测结果的box和groundth的box位置和标签都一一对应，那么在gt_list和pred_list中就有一项一一对应
        # 例如 gt_list = [1, 2, 2]  pred_list = [1, 0, 1]，那么就表示有一个漏检（下标1的位置）和一个错检（下标2的位置）
        self._gt_list = []
        self._pred_list = []
        # 保存混淆矩阵
        self._confusion_matrix = np.array([[]])

    def _check_shape(self):
        if len(np.array(self._gt_classes)) != 3:
            raise ValueError("Shape Error with gt_classes")
        if len(np.array(self._gt_boxes)) != 3 or np.array(self._gt_boxes).shape[2] != 4:
            raise ValueError("Shape Error with gt_boxes")
        if len(np.array(self._pred_classes)) != 3:
            raise ValueError("Shape Error with pred_classes")
        if len(np.array(self._pred_scores)) != 3:
            raise ValueError("Shape Error with pred_scores")
        if len(np.array(self._pred_boxes)) != 3 or np.array(self._pred_boxes).shape[2] != 4:
            raise ValueError("Shape Error with gt_classes")

    def _eval_detect_result(self, threshold):
        self._gt_list = []
        self._pred_list = []
        for pred_boxes, pred_classes, pred_scores, gt_boxes, gt_classes in zip(self._pred_boxes, self._pred_classes,
                                                                               self._pred_scores, self._gt_boxes,
                                                                               self._gt_classes):
            gt_list, pred_list = _eval_detect_result_signle_image(pred_boxes, pred_classes,
                                                                  pred_scores, gt_boxes, gt_classes, threshold)
            self._gt_list = self._gt_list + gt_list
            self._pred_list = self._pred_list + pred_list

    def _compute_pec_rec(self, threshold):
        self._init_data(threshold)
        len_of_pred_list = 0
        for i in self._pred_list:
            if i != 0:
                len_of_pred_list += 1
        if len_of_pred_list == 0:
            prec = 0.0
        else:
            prec = (np.trace(self._confusion_matrix.data) / len_of_pred_list)

        len_of_gt_list = 0
        for i in self._gt_list:
            if i != 0:
                len_of_gt_list += 1
        if len_of_gt_list == 0:
            rec = 0.0
        else:
            rec = (np.trace(self._confusion_matrix.data) / len(self._pred_list))
        return prec, rec

    def _compute_confusion_matrix(self):
        """
        计算混淆矩阵
        :return: None
        """
        # gt_set = set(self._gt_list)
        # if 0 in gt_set:
        #     gt_set.remove(0)
        # pred_set = set(self._pred_list)
        # if 0 in pred_set:
        #     pred_set.remove(0)
        # len_of_labels = len(gt_set | pred_set)
        assert len(self._gt_list) == len(self._pred_list), "gt和pred长度必须一致"
        if len(self._gt_list) == 0:
            return EvaluationMatrix(np.array([]))
        len_of_labels = int(max(max(self._gt_list), max(self._pred_list)))
        confusion_matrix = np.zeros((len_of_labels, len_of_labels), dtype=np.uint64)
        for i in range(len(self._gt_list)):
            if self._gt_list[i] != 0 and self._pred_list[i] != 0:
                confusion_matrix[int(self._gt_list[i] - 1)][int(self._pred_list[i] - 1)] += 1
        self._confusion_matrix = EvaluationMatrix(confusion_matrix)

    def _init_data(self, threshold):
        """
        调用函数计算pred_list，gt_list和confusion_matrix
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return: None
        """
        self._eval_detect_result(threshold)
        self._compute_confusion_matrix()

    def reset_data(self, gt_boxes, gt_classes, pred_boxes, pred_classes, pred_scores):
        """
        重新设置检测和标注数据
        """
        self._gt_boxes = gt_boxes
        self._gt_classes = gt_classes
        self._pred_boxes = pred_boxes
        self._pred_classes = pred_classes
        self._pred_scores = pred_scores
        self._gt_list = []
        self._pred_list = []
        self._confusion_matrix = []

    def confusion_matrix(self, threshold):
        """
        返回混淆矩阵
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return: ConfusionMatrix
        """
        self._init_data(threshold)
        return self._confusion_matrix

    def precision_and_recall(self, threshold):
        """
        计算准确率(precision)和召回率(recall)
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return:
        """
        return self._compute_pec_rec(threshold)

    def voc_ap(self):
        """
        Compute VOC AP given precision and recall.
        """
        # 11 个点
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            prec, rec = self._compute_pec_rec(t)
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = 0.0 if prec == 0 else np.max(prec[rec >= t])  # 插值
            ap = ap + p / 11.
        return ap

    def missing_matrix(self, threshold):
        """
        计算漏识别矩阵，下标0位置为填充项
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return: 一个漏识别矩阵, 表示对应下标对应位置的类别分别有多少漏检测的
        """
        self._eval_detect_result(threshold)
        # gt_set = set(self._gt_list)
        # if 0 in gt_set:
        #     gt_set.remove(0)
        if len(self._gt_list) == 0:
            return EvaluationMatrix(np.array([]))
        res_matrix = np.zeros((max(self._gt_list), 1), dtype=np.int64)
        for idx, pred in enumerate(self._pred_list):
            if pred == 0:
                res_matrix[self._gt_list[idx] - 1][0] += 1
        return EvaluationMatrix(res_matrix)


def test():
    gt_boxes = [[[10, 10, 40, 30], [15, 40, 50, 50], [60, 15, 80, 30],
                 [60, 45, 80, 55]]]  # [10, 10, 40, 30], [15, 40, 50, 50], [60, 15, 80, 30], [60, 45, 80, 55]
    gt_classes = [[1, 2, 5, 4]]  # 1, 2, 2, 3
    pred_boxes = [[[10, 13, 40, 30], [15, 40, 50, 50], [60, 45, 80, 55]]]
    pred_classes = [[1, 2, 5]]
    pred_scores = [[0.7, 0.9, 0.5]]
    threshold = 0.5
    label_map = {1: "nccn", 2: "ncor", 3: "gerber", 4: "ncon", 5: "nestle"}
    evaluator = DetectResultEvaluator(gt_boxes, gt_classes, pred_boxes, pred_classes, pred_scores)
    c_m = evaluator.confusion_matrix(threshold)
    c_m.save_excel("confusion_matrix.xlsx", label_map)
    missing_m = evaluator.missing_matrix(threshold)
    missing_m.save_excel("missing_matrix.xlsx", label_map)
    print("Precision and Recall: ", evaluator.precision_and_recall(threshold))
    print("Missing matrix: \n", missing_m.data)
    print("mAP: ", evaluator.voc_ap())


if __name__ == "__main__":
    test()
