# -*- coding: utf-8 -*-
# ================================================================
#
#   Editor      : PyCharm
#   File name   : mAP.py
#   Author      : CGump
#   Email       : huangzhigang93@qq.com
#   Created date: 2021/3/26 20:08
#
# ================================================================
import glob
import json
import os
import shutil
import operator
import sys
import math
import cv2
import matplotlib.pyplot as plt
import numpy as np

# 两个目标框重叠的交并比阈值，用于判断两个目标框是否框选相同的区域
MINOVERLAP = 0.45
# 两个目标框重叠的最小交并比阈值，用于判断小框与大框的重叠情况
MININTERSECT = 0.005
# 交占比阈值，S(DR ∩ GT)/S(DR)，用于判断小框与大框的重叠情况
MINCOVER = 0.7
# 验证集标注框存放文件夹
GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth')
# 检测结果目标框存放文件夹
DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results')
# 验证集图片的存放文件夹
IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional')
# 中转文件存放文件夹
TEMP_FILES_PATH = ".temp_files"
# 结果存放文件夹
RESULTS_FILES_PATH = "results"
# 每类目标的召回率-精确率曲线存放文件夹
REC_PREC_PATH = os.path.join(RESULTS_FILES_PATH, "classes")
# 每个框核对的结果图像存放文件夹
CHECK_PER_IMAGE_PATH = os.path.join(RESULTS_FILES_PATH, "images", "detections_one_by_one")
# 统计验证集中每个类别的标注框数量
gt_counter_per_class = {}
# 统计所有图像中，每种类别的分布，即出现某种类别的图像数量有多少
counter_images_per_class = {}

aim_class = "whitecell"
"""
DR: detection result, GT: ground truth
    0,0 ------> x (width)
     |
     |  (Left,Top)
     |      *_________
     |      |         |
            |         |
     y      |_________|
  (height)            *
                (Right,Bottom)
"""


def log_average_miss_rate(prec, fp_cumsum, num_images):
    """
    log-average miss rate:
        Calculated by averaging miss rates at 9 evenly spaced FPPI points
        between 10e-2 and 10e0, in log-space.

    output:
            lamr | log-average miss rate
            mr | miss rate
            fppi | false positives per image

    references:
        [1] Dollar, Piotr, et al. "Pedestrian Detection: An Evaluation of the
           State of the Art." Pattern Analysis and Machine Intelligence, IEEE
           Transactions on 34.4 (2012): 743 - 761.
    """
    # if there were no detections of that class
    if prec.size == 0:
        lamr = 0
        mr = 1
        fppi = 0
        return lamr, mr, fppi

    fppi = fp_cumsum / float(num_images)
    mr = (1 - prec)

    fppi_tmp = np.insert(fppi, 0, -1.0)
    mr_tmp = np.insert(mr, 0, 1.0)

    # Use 9 evenly spaced reference points in log-space
    ref = np.logspace(-2.0, 0.0, num=9)
    for i, ref_i in enumerate(ref):
        # np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
        j = np.where(fppi_tmp <= ref_i)[-1][-1]
        ref[i] = mr_tmp[j]

    # log(0) is undefined, so we use the np.maximum(1e-10, ref)
    lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))

    return lamr, mr, fppi


def error(msg):
    """
    处理异常
    """
    print(msg)
    sys.exit(0)


def is_float_between_0_and_1(value):
    """
    检查输入在0.0-1.0之间
    """
    try:
        val = float(value)
        if 0.0 < val < 1.0:
            return True
        else:
            return False
    except ValueError:
        return False


def file_lines_to_list(path):
    """
    将文本文件按行读入列表
    """
    with open(path) as f:
        content = f.readlines()
    # 去掉每行的`\n`
    content = [x.strip() for x in content]
    return content


def draw_text_in_image(img_input, text_content, position, t_color, l_width):
    """
    在图像上进行标注
    """
    font = cv2.FONT_HERSHEY_PLAIN
    font_scale = 1
    line_type = 1
    bottom_left_corner_of_text = position
    cv2.putText(img_input,
                text_content,
                bottom_left_corner_of_text,
                font,
                font_scale,
                t_color,
                line_type)
    text_width, _ = cv2.getTextSize(text_content, font, font_scale, line_type)[0]
    return img_input, (l_width + text_width)


def adjust_axes(r, t, fig, axes):
    """
    调整坐标轴
    """
    bb = t.get_window_extent(renderer=r)
    text_width_inches = bb.width / fig.dpi
    # get axis width in inches
    current_fig_width = fig.get_figwidth()
    new_fig_width = current_fig_width + text_width_inches
    propotion = new_fig_width / current_fig_width
    # get axis limit
    x_lim = axes.get_xlim()
    axes.set_xlim([x_lim[0], x_lim[1] * propotion])


def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color,
                   true_p_bar):
    """
    画统计直方图
    """
    # sort the dictionary by decreasing value, into a list of tuples
    sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
    # unpacking the list of tuples into two lists
    sorted_keys, sorted_values = zip(*sorted_dic_by_value)
    #
    if true_p_bar != "":
        """
         Special case to draw in:
            - green -> TP: True Positives (object detected and matches ground-truth)
            - red -> FP: False Positives (object detected but does not match ground-truth)
            - orange -> FN: False Negatives (object not detected but present in the ground-truth)
        """
        fp_sorted = []
        tp_sorted = []
        for key in sorted_keys:
            fp_sorted.append(dictionary[key] - true_p_bar[key])
            tp_sorted.append(true_p_bar[key])
        plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
        plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive',
                 left=fp_sorted)
        # add legend
        plt.legend(loc='lower right')
        """
         Write number on side of bar
        """
        fig = plt.gcf()  # gcf - get current figure
        axes = plt.gca()
        r = fig.canvas.get_renderer()
        for i, val in enumerate(sorted_values):
            fp_val = fp_sorted[i]
            tp_val = tp_sorted[i]
            fp_str_val = " " + str(fp_val)
            tp_str_val = fp_str_val + " " + str(tp_val)
            # trick to paint multicolor with offset:
            # first paint everything and then repaint the first number
            t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
            plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
            if i == (len(sorted_values) - 1):  # largest bar
                adjust_axes(r, t, fig, axes)
    else:
        plt.barh(range(n_classes), sorted_values, color=plot_color)
        """
         Write number on side of bar
        """
        fig = plt.gcf()  # gcf - get current figure
        axes = plt.gca()
        r = fig.canvas.get_renderer()
        for i, val in enumerate(sorted_values):
            str_val = " " + str(val)  # add a space before
            if val < 1.0:
                str_val = " {0:.2f}".format(val)
            t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
            # re-set axes to show number inside the figure
            if i == (len(sorted_values) - 1):  # largest bar
                adjust_axes(r, t, fig, axes)
    # set window title
    fig.canvas.manager.set_window_title(window_title)
    # write classes in y axis
    tick_font_size = 12
    plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
    """
     Re-scale height accordingly
    """
    init_height = fig.get_figheight()
    # comput the matrix height in points and inches
    dpi = fig.dpi
    height_pt = n_classes * (tick_font_size * 1.4)  # 1.4 (some spacing)
    height_in = height_pt / dpi
    # compute the required figure height
    top_margin = 0.15  # in percentage of the figure height
    bottom_margin = 0.05  # in percentage of the figure height
    figure_height = height_in / (1 - top_margin - bottom_margin)
    # set new height
    if figure_height > init_height:
        fig.set_figheight(figure_height)

    # set plot title
    plt.title(plot_title, fontsize=14)
    # set axis titles
    # plt.xlabel('classes')
    plt.xlabel(x_label, fontsize='large')
    # adjust size of window
    fig.tight_layout()
    # save the plot
    fig.savefig(output_path)
    # show image
    if to_show:
        plt.show()
    # close the plot
    plt.close()


def make_dirs(dir_path):
    if os.path.exists(dir_path):
        shutil.rmtree(dir_path)
    os.makedirs(dir_path)


def GT_to_json(gt_path, dr_path, temp_file_path, cls_ignore=None):
    """
    将ground truth的txt文件以字典列表的形式写入同名的json文件中
    json内存放格式
    [{"class_name": "simple_crack", "bbox": "1 2448 4096 2991", "used": false}, ...]
    这个功能可以直接写在get_truthbox.py中
    转换xml标注文件时直接写成json格式就行
    然后做一个遍历，统计每类缺陷个数和每类缺陷图像数，写进配置文件
    """
    # 读取GT文件的列表
    gt_list = glob.glob(gt_path + '/*.txt')  # 读取ground truth下所有.txt文件
    if len(gt_list) == 0:
        error("Error: No ground-truth files found!")
    gt_list.sort()  # 按文件名排序
    if not cls_ignore:
        cls_ignore = []
    for txt_file in gt_list:
        file_id = txt_file.split(".txt", 1)[0]
        file_id = os.path.basename(os.path.normpath(file_id))  # 规范化路径，并拿到GT文件中的文件名

        # 检查是否存在同样图像的DR文件，保证GT和DR可以一一对应，如果没有则直接抛出错误
        temp_path = os.path.join(dr_path, (file_id + ".txt"))
        if not os.path.exists(temp_path):
            error_msg = "Error. File not found: {}\n".format(temp_path)
            error(error_msg)

        # 读入GT文件文件所有行信息，并转换为字符串列表
        lines_list = file_lines_to_list(txt_file)
        # 每个GT标注框写入字典中，每个GT字典存入一个列表里
        bounding_boxes = []  # 存放每个GT标注框的类别与L, T, R, B
        is_difficult = False  # 用于判断是否标签文件中的difficult
        already_seen_classes = []  # 用于判断该结果中，每个检测框的类别是否重复
        for line in lines_list:
            if "difficult" in line:
                class_name, left, top, right, bottom, _ = line.split()
                is_difficult = True
            else:
                class_name, left, top, right, bottom = line.split()

            # 检查是否该类别存在于需要因此的类别列表中，如果是则跳过该类别处理
            if class_name in cls_ignore:
                continue

            bbox = left + " " + top + " " + right + " " + bottom
            if is_difficult:
                bounding_boxes.append({"class_name": class_name,
                                       "bbox": bbox,
                                       "used": False,
                                       "occupied": False,
                                       "difficult": True})
                is_difficult = False
            else:
                bounding_boxes.append({"class_name": class_name,
                                       "bbox": bbox,
                                       "used": False,
                                       "occupied": False})

                # 这里记录已统计出的识别类别及数量，如果有difficult标签是不会计入的
                # todo 只做一个类别的
                if class_name == aim_class:
                    if class_name in gt_counter_per_class:
                        gt_counter_per_class[class_name] += 1
                    else:
                        # 如果gt_counter_per_class中还没有则新建该类的key并计入1
                        gt_counter_per_class[class_name] = 1

                # 记录每张图像中出现哪些类别
                # 比如，这张图象出现了苹果、梨子，下一张图像出现了2个苹果，
                # 那结果数据就是，出现苹果的图像有2张，出现梨子的图像有1张
                if class_name not in already_seen_classes:
                    if class_name in counter_images_per_class:
                        counter_images_per_class[class_name] += 1
                    else:
                        # 如果counter_images_per_class中还没有则新建该类的key并计入1
                        counter_images_per_class[class_name] = 1
                    already_seen_classes.append(class_name)

        # 将GT文件内标注框的遍历结果写入.temp_files目录下的xxx_ground_truth.json文件
        with open(temp_file_path + "/" + file_id + "_ground_truth.json", 'w') as outfile:
            json.dump(bounding_boxes, outfile)

    return gt_list


def DR_to_json(dr_path, gt_path, temp_file_path, classes):
    """
    将DR结果按类别分开写入类别_dr.json文件中
    json内存放格式
    [{"confidence": "0.87", "file_id": "binhelu_SXX2_IMGmono_026995_7417228096150", "bbox": "101 866 790 2976"}, ...]
    这里也可以写在另一个函数里面，将detection result统一一起转成json文件
    """
    # 拿到detection results的文件目录下所有txt文档的路径，并排序
    dr_list = glob.glob(dr_path + '/*.txt')
    dr_list.sort()

    # 这里开始处理detection results的每个结果，将每个DR结果保存至.temp_files目录下的 类名_dr.json 文件内
    # 注意，这里是直接写入以类别名为基础的json文件，每个类别名的json文件下以置信度、文件ID（去后缀）、边界框字符串（空格为间隔符）
    # 文件ID是统一的，就是去除后缀和路径的那个ID
    for class_index, class_name in enumerate(classes):
        bounding_boxes = []
        for txt_file in dr_list:
            # 与GT_to_json一样，通过GT文件来核对DR的文件是否一一对应
            file_id = txt_file.split(".txt", 1)[0]
            file_id = os.path.basename(os.path.normpath(file_id))
            temp_path = os.path.join(gt_path, (file_id + ".txt"))
            # 同样，还是做了一个双向确认，在索引DR时同步检测同名的GT是否存在
            if class_index == 0:
                if not os.path.exists(temp_path):
                    error_msg = "Error. File not found: {}\n".format(temp_path)
                    error(error_msg)

            lines = file_lines_to_list(txt_file)
            for line in lines:
                try:
                    tmp_class_name, confidence, left, top, right, bottom = line.split()
                    if tmp_class_name == class_name:
                        bbox = left + " " + top + " " + right + " " + bottom
                        bounding_boxes.append({"confidence": confidence, "file_id": file_id, "bbox": bbox})
                except ValueError:
                    error_msg = "Error: File " + txt_file + " in the wrong format.\n"
                    error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
                    error_msg += " Received: " + line
                    error(error_msg)

        # 将DR结果按照置信度大小排序
        bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True)
        with open(temp_file_path + "/" + class_name + "_dr.json", 'w') as outfile:
            json.dump(bounding_boxes, outfile)

    return dr_list


def voc_ap(rec, prec):
    """
    rec和prec是一个列表，它们记录的是随着DR结果个数的增长，相应TP与总GT数、TP与TP+FP的【累计值】
    前向计算的TP和FP也都是转换为了累计值
    比如之前TP = [0,1,1,0,1,0,1,0]
    从左往右累计统计之后TP = [0,1,2,2,3,3,4,4]
    如果该类的总标注数为10个，那么recall = [0,0.1,0.2,0.2,0.3,0.3,0.4,0.4]
    precision比较复杂，是TP与TP+FP的比值
    此时FP = [1,0,0,1,0,1,0,1]，FP累计=[1,1,1,2,2,3,3,4]
    那么累计的precision = [0.0, 0.5, 0.67, 0.5, 0.6, 0.5, 0.57, 0.5]
    AP就是recall和precision曲线的积分面积

    mrec=[0 ; rec ; 1];
    mpre=[0 ; prec ; 0];
    for i=numel(mpre)-1:-1:1
            mpre(i)=max(mpre(i),mpre(i+1));
    end
    i=find(mrec(2:end)~=mrec(1:end-1))+1;
    ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
    """
    # recall头尾插入0和1，这里是为了和precision对齐
    rec.insert(0, 0.0)
    rec.append(1.0)
    mrec = rec[:]  # 这里是深拷贝
    # precision头尾插入0和0，因为后面是从最后一位开始比较大小，所以是0
    prec.insert(0, 0.0)
    prec.append(0.0)
    mpre = prec[:]  # 深拷贝

    # 从最后第二位，到第0个进行遍历，计算第i和第i+1位的最大值，并赋值给第i位
    # 计算前precision = [0, 0.0, 0.5, 0.67, 0.5, 0.6, 0.5, 0.57, 0.5, 0]
    # 计算后precision = [0.67, 0.67, 0.67, 0.67, 0.6, 0.6, 0.57, 0.57, 0.5, 0]
    for i in range(len(mpre) - 2, -1, -1):
        mpre[i] = max(mpre[i], mpre[i + 1])

    # 这里构建了横坐标，即为recall的增长变化，然后通过这个索引值找到对应的precision
    # recall = [0, 0, 0.1, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4, 1]
    # i_list = [2, 3, 5, 7, 9]
    i_list = []
    for i in range(1, len(mrec)):
        # 我们只需要记录recall内不重复的值
        if mrec[i] != mrec[i - 1]:
            i_list.append(i)

    # ap的计算就是积分面积
    ap = 0.0
    for i in i_list:
        ap += ((mrec[i] - mrec[i - 1]) * mpre[i])
    return ap, mrec, mpre


if __name__ == '__main__':
    # 确保当前工作目录是main.py的文件路径
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    # 参数
    no_animation = True
    show_animation = True
    draw_plot = True
    specific_iou = {}  # 设置每类iou单独阈值，格式class:iou，比如{'apple':0.5}
    ignore = []
    sum_AP = 0.0
    # 每类AP值的字典
    ap_dict = {}
    # 每类对数平均误检率曲线字典(log-average miss rate)
    lamr_dict = {}
    # 统计每类的TP个数
    count_true_positives = {}

    # 创建文件夹
    make_dirs(TEMP_FILES_PATH)
    make_dirs(RESULTS_FILES_PATH)
    if show_animation:
        make_dirs(CHECK_PER_IMAGE_PATH)
    if draw_plot:
        make_dirs(REC_PREC_PATH)

    # 将GT文件全部转换为json格式，输出GT文件个数
    gt_file_list = GT_to_json(gt_path=GT_PATH, dr_path=DR_PATH, temp_file_path=TEMP_FILES_PATH)

    # 以GT文件为基准，获得识别的类别、类别数
    gt_class = sorted(list(gt_counter_per_class.keys()))
    num_class = len(gt_class)

    # 将DR文件全部写成按类归类的json文件
    dr_file_list = DR_to_json(dr_path=DR_PATH, gt_path=GT_PATH, temp_file_path=TEMP_FILES_PATH, classes=gt_class)

    # 计算AP值
    with open(f"{RESULTS_FILES_PATH}/results.txt", 'w') as results_file:
        results_file.write("# AP and precision/recall per class\n")
        for cls_index, cls_name in enumerate(gt_class):
            count_true_positives[cls_name] = 0
            # 按类读取DR结果
            dr_data = json.load(open(f"{TEMP_FILES_PATH}/{cls_name}_dr.json"))
            num_dr = len(dr_data)
            # 创建两个列表记录TP和FP，one_hot编码方式，可以统计所有的量，方便计算每一步的AP等参数
            true_positives = [0] * num_dr
            false_positive = [0] * num_dr
            # 遍历当前cls_name的json文件中的每个缺陷信息
            # 缺陷储存为字典，{置信度:字符串，文件名:字符串，检测框:字符串}
            for idx, dr_result in enumerate(dr_data):
                dr_name = dr_result["file_id"]

                if show_animation:
                    # 拿到当前图像的文件名，这里用glob的好处是可以将文件名读进列表，方便判断是否有该图片
                    optional_img = glob.glob1(IMG_PATH, dr_name + ".*")
                    if len(optional_img) == 0:
                        error(f"Error. Image not found with id: {dr_name}")
                    elif len(optional_img) > 1:
                        error(f"Error. Multiple image with id: {dr_name}")
                    else:
                        # 这个image变量的图像用于单框的检测比对结果
                        image = cv2.imread(f"{IMG_PATH}/{optional_img[0]}")
                        # 这个image_cumulative用于绘制单张图像所有的检测框情况
                        image_cumulative_path = f"{RESULTS_FILES_PATH}/images/{optional_img[0]}"
                        # 由于是循环写入的，这里可能已经有所有检测框判断的图片了，所以只要读取后复写上去就行
                        if os.path.isfile(image_cumulative_path):
                            image_cumulative = cv2.imread(image_cumulative_path)
                        else:
                            image_cumulative = image.copy()
                        # 给image底部添加一个黑边，后续识别结果写入上面
                        bottom_border = 60
                        image = cv2.copyMakeBorder(image, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=[0, 0, 0])

                # 通过dr_name读取相同文件名的GT标注框json
                gt_file = f"{TEMP_FILES_PATH}/{dr_name}_ground_truth.json"
                gt_data = json.load(open(gt_file))
                # 解析DR字典中的bounding box坐标
                dr_box = [float(x) for x in dr_result["bbox"].split()]
                iou_max = -1  # 用于存放两个框最大的交并比值
                iodr_max = -1  # 用于存放两个框最大的交占比值
                gt_match = -1
                for gt_annotation in gt_data:
                    # 循环比较每个GT标注框与DR结果的重叠度，计算出重叠度最高的框
                    if gt_annotation["class_name"] == cls_name:
                        gt_box = [float(x) for x in gt_annotation["bbox"].split()]
                        # 计算DR与GT的交集区域
                        intersection_box = [max(dr_box[0], gt_box[0]),
                                            max(dr_box[1], gt_box[1]),
                                            min(dr_box[2], gt_box[2]),
                                            min(dr_box[3], gt_box[3])]
                        # 计算DR与GT的交集区域宽度、高度
                        intersection_w = intersection_box[2] - intersection_box[0]
                        intersection_h = intersection_box[3] - intersection_box[1]
                        if intersection_w > 0 and intersection_h > 0:
                            # 计算IOU = 交集(intersection)面积/并集(union)面积
                            dr_area = (dr_box[2] - dr_box[0]) * (dr_box[3] - dr_box[1])
                            gt_area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
                            intersection_area = intersection_w * intersection_h
                            union_area = dr_area + gt_area - intersection_area
                            # iou值：交集与并集的面积比值
                            iou = intersection_area / union_area
                            # iodr值：交集与dr框面积的比值
                            iodr = intersection_area / dr_area
                            if iou > iou_max:
                                iou_max = iou
                                gt_match = gt_annotation
                            if iodr > iodr_max:
                                iodr_max = iodr

                # 开始判断是重叠，还是分开框计算的
                if show_animation:
                    status = "NO MATCH FOUND!"
                # 拿到IOU的阈值，由于有按类设置特殊的IOU，因此在这里判断
                min_iou = MINOVERLAP
                if specific_iou:
                    if cls_name in specific_iou:
                        min_iou = specific_iou[cls_name]
                # 开始比较iou、iodr与阈值的关系
                if iou_max >= min_iou:
                    # 表示与原标注GT一致
                    if "difficult" not in gt_match:
                        if not bool(gt_match["used"]):
                            true_positives[idx] = 1
                            gt_match["used"] = True  # 这里是个指针，直接指向原始的字典的，所以会同步修改
                            count_true_positives[cls_name] += 1
                            if not bool(gt_match["occupied"]):
                                gt_match["occupied"] = True
                            else:
                                gt_counter_per_class[cls_name] += 1
                            with open(gt_file, 'w') as gt_mark:
                                gt_mark.write(json.dumps(gt_data))
                            if show_animation:
                                status = "MATCH!"
                        else:
                            # 如果这个GT的标注框已经被使用了
                            false_positive[idx] = 1
                            if show_animation:
                                status = "REPEATED MATCH!"
                elif MININTERSECT <= iou_max < min_iou:
                    # 此时表示目标框的交并比是小于阈值的
                    if "difficult" not in gt_match:
                        if iodr_max >= MINCOVER:
                            # 此时说明目标框占GT标注框的一小部分，并准确检测到了缺陷部位
                            true_positives[idx] = 1
                            count_true_positives[cls_name] += 1
                            if not bool(gt_match["occupied"]):
                                gt_match["occupied"] = True
                                with open(gt_file, 'w') as gt_mark:
                                    gt_mark.write(json.dumps(gt_data))
                            else:
                                gt_counter_per_class[cls_name] += 1
                            if show_animation:
                                status = "MATCH!"
                        else:
                            # 如果小于则表示没有匹配上
                            false_positive[idx] = 1
                            if show_animation:
                                status = "INSUFFICIENT OVERLAP"
                else:
                    # 上面都没有表示这个是FP
                    false_positive[idx] = 1
                    if iou_max > 0:
                        status = "INSUFFICIENT OVERLAP"

                # 在图像上画好匹配情况
                if show_animation:
                    height, width = image.shape[:2]
                    # 标记颜色，opencv的BGR排布
                    white = (255, 255, 255)  # 文字颜色
                    light_blue = (255, 200, 100)  # GT标注框颜色
                    green = (0, 255, 0)  # DR框与GT框匹配时的颜色
                    light_red = (30, 30, 255)
                    # 第一行
                    margin = 10
                    v_pos = int(height - margin - (bottom_border / 2.0))
                    text = f"Image: {optional_img[0]} "
                    image, line_width = draw_text_in_image(image, text, (margin, v_pos), white, 0)
                    text = f"Class [{str(cls_index)}/{str(num_class)}]: {cls_name} "
                    image, line_width = draw_text_in_image(image, text, (margin + line_width, v_pos),
                                                           light_blue,
                                                           line_width)
                    if iou_max != -1:
                        if status == "INSUFFICIENT OVERLAP":
                            color = light_red
                            text = "IoU: {0:.2f}% ".format(iou_max * 100) + "< {0:.2f}% ".format(min_iou * 100)
                        else:
                            color = green
                            text = "IoU: {0:.2f}% ".format(iou_max * 100) + ">= {0:.2f}% ".format(min_iou * 100)
                        image, _ = draw_text_in_image(image, text, (margin + line_width, v_pos), color, line_width)
                    # 第二行
                    v_pos += int(bottom_border / 2.0)
                    rank_pos = str(idx + 1)
                    text = "Detection #rank: " + rank_pos + \
                           " confidence: {0:.2f}% ".format(float(dr_result["confidence"]) * 100)
                    image, line_width = draw_text_in_image(image, text, (margin, v_pos), white, 0)
                    if status == "MATCH!":
                        color = green
                    else:
                        color = light_red
                    text = f"Result: {status} "
                    image, line_width = draw_text_in_image(image, text, (margin + line_width, v_pos), color, line_width)
                    word_font = cv2.FONT_HERSHEY_SIMPLEX
                    if iou_max > 0:
                        gt_box = [int(round(float(x))) for x in gt_match["bbox"].split()]
                        cv2.rectangle(image, (gt_box[0], gt_box[1]), (gt_box[2], gt_box[3]), light_blue, 2)
                        cv2.rectangle(image_cumulative, (gt_box[0], gt_box[1]), (gt_box[2], gt_box[3]), light_blue, 2)
                        cv2.putText(image_cumulative, cls_name, (gt_box[0], gt_box[1] - 5),
                                    word_font, 0.6, light_blue, 1, cv2.LINE_AA)
                    dr_box = [int(i) for i in dr_box]
                    cv2.rectangle(image, (dr_box[0], dr_box[1]), (dr_box[2], dr_box[3]), color, 2)
                    cv2.rectangle(image_cumulative, (dr_box[0], dr_box[1]), (dr_box[2], dr_box[3]), color, 2)
                    cv2.putText(image_cumulative, cls_name, (dr_box[0], dr_box[1] - 5),
                                word_font, 0.6, color, 1, cv2.LINE_AA)
                    # 显示图像
                    cv2.namedWindow("Animation", cv2.WINDOW_FREERATIO)
                    cv2.imshow("Animation", image)
                    cv2.waitKey(10)
                    save_path = f"{RESULTS_FILES_PATH}/images/detections_one_by_one/{cls_name}_detection{str(idx)}.jpg"
                    cv2.imwrite(save_path, image)
                    cv2.imwrite(image_cumulative_path, image_cumulative)

            # 计算准确率（precision）和召回率（recall）
            cumsum = 0
            for idx, n in enumerate(false_positive):
                false_positive[idx] += cumsum
                cumsum += n
            cumsum = 0
            for idx, n in enumerate(true_positives):
                true_positives[idx] += cumsum
                cumsum += n
            # 召回率
            recall = true_positives[:]
            for idx, _ in enumerate(true_positives):
                recall[idx] = float(true_positives[idx]) / gt_counter_per_class[cls_name]
            # 精确率
            precision = true_positives[:]
            for idx, _ in enumerate(true_positives):
                precision[idx] = float(true_positives[idx]) / (false_positive[idx] + true_positives[idx])

            average_precision, m_recall, m_precision = voc_ap(recall[:], precision[:])
            sum_AP += average_precision
            text = "{0:.2f}%".format(average_precision * 100) + " = " + cls_name + " AP "
            # 写入results.txt文档
            round_precision = ["%.2f" % elem for elem in precision]
            round_recall = ["%.2f" % elem for elem in recall]
            results_file.write(f"{text}\n Precision: {str(round_precision)}\n Recall: {str(round_recall)}\n\n")
            ap_dict[cls_name] = average_precision
            n_images = counter_images_per_class[cls_name]
            lam_rate, _, _ = log_average_miss_rate(np.array(recall),
                                                   np.array(false_positive),
                                                   n_images)
            lamr_dict[cls_name] = lam_rate

            if draw_plot:
                plt.plot(recall, precision, '-o')
                area_under_curve_x = m_recall[:-1] + [m_recall[-2]] + [m_recall[-1]]
                area_under_curve_y = m_precision[:-1] + [0.0] + [m_precision[-1]]
                plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
                fig_cls = plt.gcf()
                fig_cls.canvas.manager.set_window_title('AP ' + cls_name)
                plt.title('class: ' + text)
                plt.xlabel('Recall')
                plt.ylabel('Precision')
                axes_cls = plt.gca()
                axes_cls.set_xlim([0.0, 1.0])
                axes_cls.set_ylim([0.0, 1.05])
                fig_cls.savefig(f"{RESULTS_FILES_PATH}/classes/{cls_name}.png")
                plt.cla()

        if show_animation:
            cv2.destroyAllWindows()

        results_file.write("\n# mAP of all classes\n")
        mAP = sum_AP / num_class
        text = "mAP = {0:.2f}%".format(mAP * 100)
        results_file.write(text + "\n")
        print(text)

    # 删除中转文件夹
    shutil.rmtree(TEMP_FILES_PATH)

    det_counter_per_class = {}
    for txt in dr_file_list:
        l_list = file_lines_to_list(txt)
        for item in l_list:
            cls_name = item.split()[0]
            if cls_name in ignore:
                continue
            if cls_name in det_counter_per_class:
                det_counter_per_class[cls_name] += 1
            else:
                det_counter_per_class[cls_name] = 1
    dr_class = list(det_counter_per_class.keys())

    # 绘制每类的GT统计数柱状图
    if draw_plot:
        window_titles = "ground-truth-info"
        plot_titles = "ground-truth\n"
        plot_titles += f"({str(len(gt_file_list))} files and {str(num_class)} classes)"
        x_axis_label = "Number of objects per class"
        output_image_path = f"{RESULTS_FILES_PATH}/ground-truth-info.png"
        show_flag = False
        plot_colors = 'forestgreen'
        draw_plot_func(
            gt_counter_per_class,
            num_class,
            window_titles,
            plot_titles,
            x_axis_label,
            output_image_path,
            show_flag,
            plot_colors,
            '',
        )
    # 将每类的GT框统计保存至results.txt
    with open(f"{RESULTS_FILES_PATH}/results.txt", 'a') as results_file:
        results_file.write("\n# Number of ground-truth objects per class\n")
        for cls_name in sorted(gt_counter_per_class):
            results_file.write(f"{cls_name}: {str(gt_counter_per_class[cls_name])}\n")
    # TP统计数清零
    for cls_name in dr_class:
        if cls_name not in gt_class:
            count_true_positives[cls_name] = 0

    # 绘制每类的检测结果TP与FP柱状图
    if draw_plot:
        window_titles = "detection-results-info"
        # Plot title
        plot_titles = "detection-results\n"
        plot_titles += f"({str(len(dr_file_list))}files and "
        count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(det_counter_per_class.values()))
        plot_titles += f"{str(count_non_zero_values_in_dictionary)} detected classes)"
        # end Plot title
        x_axis_label = "Number of objects per class"
        output_image_path = f"{RESULTS_FILES_PATH}/detection-results-info.png"
        show_flag = False
        plot_colors = 'forestgreen'
        true_plot_bar = count_true_positives
        draw_plot_func(
            det_counter_per_class,
            len(det_counter_per_class),
            window_titles,
            plot_titles,
            x_axis_label,
            output_image_path,
            show_flag,
            plot_colors,
            true_plot_bar
        )

    # 将每类的检测框数量写入results.txt
    with open(f"{RESULTS_FILES_PATH}/results.txt", 'a') as results_file:
        results_file.write("\n# Number of detected objects per class\n")
        for cls_name in sorted(dr_class):
            n_det = det_counter_per_class[cls_name]
            text = cls_name + ": " + str(n_det)
            text += " (tp:" + str(count_true_positives[cls_name]) + ""
            text += ", fp:" + str(n_det - count_true_positives[cls_name]) + ")\n"
            results_file.write(text)

    # 绘制每类的lamr柱状图
    if draw_plot:
        window_titles = "lamr"
        plot_titles = "log-average miss rate"
        x_axis_label = "log-average miss rate"
        output_image_path = f"{RESULTS_FILES_PATH}/lamr.png"
        show_flag = False
        plot_colors = 'royalblue'
        draw_plot_func(
            lamr_dict,
            num_class,
            window_titles,
            plot_titles,
            x_axis_label,
            output_image_path,
            show_flag,
            plot_colors,
            ""
        )

    # 绘制每类的AP柱状图
    if draw_plot:
        window_titles = "mAP"
        plot_titles = "mAP = {0:.2f}%".format(mAP * 100)
        x_axis_label = "Average Precision"
        output_image_path = f"{RESULTS_FILES_PATH}/mAP.png"
        show_flag = True
        plot_colors = 'royalblue'
        draw_plot_func(
            ap_dict,
            num_class,
            window_titles,
            plot_titles,
            x_axis_label,
            output_image_path,
            show_flag,
            plot_colors,
            ""
        )
