import numpy as np
import tqdm  # 进度条
import cv2


# # import torch
# # 计算两个点坐标的box的交并比，被get_batch_statistics，non_max_suppression，build_targets引用
# # 无论两个矩形框的表示形式是怎样的，都要转化成左上角右下角 xyxy 的格式。
# # 求左边界和上边界用max
# # 求右边界和下边界用min
# # 相交的宽度为负时，赋值为 0
# # box 的宽度等于右坐标 - 左坐标 + 1
# def intersection_area(label_box, detect_box):
#     x_label_min, y_label_min, x_label_max, y_label_max = label_box
#     x_detect_min, y_detect_min, x_detect_max, y_detect_max = detect_box[0]
#     if (x_label_max <= x_detect_min or x_detect_max < x_label_min) or (
#             y_label_max <= y_detect_min or y_detect_max <= y_label_min):
#         return 0
#     else:
#         lens = min(x_label_max, x_detect_max) - max(x_label_min, x_detect_min)
#         wide = min(y_label_max, y_detect_max) - max(y_label_min, y_detect_min)
#         return lens * wide
#
#
# def union_area(label_box, detect_box):
#     x_label_min, y_label_min, x_label_max, y_label_max = label_box
#     x_detect_min, y_detect_min, x_detect_max, y_detect_max = detect_box[0]
#
#     area_label = (x_label_max - x_label_min) * (y_label_max - y_label_min)
#     area_detect = (x_detect_max - x_detect_min) * (y_detect_max - y_detect_min)
#     inter_area = intersection_area(label_box, detect_box)
#
#     area_union = area_label + area_detect - inter_area
#
#     return area_union
# def bbox_iou(box_pre, box_gts, x1y1x2y2=True):
#     """
#     Returns the IoU of two bounding boxes
#     """
#     ious = []
#     for box in box_gts:
#         i_area = intersection_area(box, box_pre)
#         u_area = union_area(box, box_pre)
#         iou = i_area / u_area
#         ious.append(iou)
#     return ious

def draw_region(img, rect_1, rect_2=None, fill_value=255):
    pt_list = list()
    for i in range(4):
        pt_list.append((rect_1[i * 2], rect_1[i * 2 + 1]))
    cv2.fillPoly(img, [np.array(pt_list)], fill_value)
    if rect_2 is not None:
        pt_list2 = list()
        for i in range(4):
            pt_list2.append((rect_2[i * 2], rect_2[i * 2 + 1]))
        cv2.fillPoly(img, [np.array(pt_list2)], fill_value)
    return img


def compute_iou(net_width, net_height, rect1, rect2, fill_value=255):
    img1 = np.zeros((net_height, net_width), np.uint8)
    img2 = np.zeros((net_height, net_width), np.uint8)
    img3 = np.zeros((net_height, net_width), np.uint8)
    out_img1 = draw_region(img1, rect1, fill_value=fill_value)
    out_img2 = draw_region(img2, rect2, fill_value=fill_value)
    out_img3 = draw_region(img3, rect1, rect2, fill_value=fill_value)
    area_1 = np.sum(out_img1 == fill_value)
    area_2 = np.sum(out_img2 == fill_value)
    area_com = np.sum(out_img3 == fill_value)
    # print("area1 ={} area2={} area3={} ".format(area_1, area_2, area_com))
    iou = (area_1 + area_2 - area_com) * 1.0 / area_com
    return iou


# 不规则四边形IOU
def bbox_iou2(box_pre, box_gts, x1y1x2y2=True):
    """
    Returns the IoU of two bounding boxes
    """
    ious = []
    if len(box_pre) == 1:
        box_pre = box_pre[0]
    for box in box_gts:
        box_gt = []
        if not isinstance(box, list):
            box = box.tolist()
        box_gt += box[0]
        box_gt += box[1]
        box_gt += box[2]
        box_gt += box[3]
        # width = max(max(box_gt[0], box_gt[2], box_gt[4], box_gt[6]) - min(box_gt[0], box_gt[2], box_gt[4], box_gt[6]), \
        #     max(box_pre[0], box_pre[2], box_pre[4], box_pre[6]) - min(box_pre[0], box_pre[2], box_pre[4], box_pre[6]))
        # height = max(max(box_gt[1], box_gt[2], box_gt[5], box_gt[7]) - min(box_gt[1], box_gt[2], box_gt[5], box_gt[7]), \
        #     max(box_pre[1], box_pre[3], box_pre[5], box_pre[7]) - min(box_pre[1], box_pre[3], box_pre[5], box_pre[7]))
        width = max(max(box_gt[0], box_gt[2], box_gt[4], box_gt[6]),
                    max(box_pre[0], box_pre[2], box_pre[4], box_pre[6]))
        height = max(max(box_gt[1], box_gt[2], box_gt[5], box_gt[7]),
                     max(box_pre[1], box_pre[3], box_pre[5], box_pre[7]))
        ious.append(compute_iou(width, height, box_gt, box_pre))

    return ious


# def voc_ap(rec, prec):
#     """
#     --- Official matlab code VOC2012---
#     mrec=[0 ; rec ; 1];
#     mpre=[0 ; prec ; 0];
#     for i=numel(mpre)-1:-1:1
#             mpre(i)=max(mpre(i),mpre(i+1));
#     end
#     i=find(mrec(2:end)~=mrec(1:end-1))+1;
#     ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
#     """
#     mrec = np.concatenate(([0.0], rec, [1.0]))
#     mpre = np.concatenate(([0.0], prec, [0.0]))
#
#     """
#      This part makes the precision monotonically decreasing
#         (goes from the end to the beginning)
#         matlab: for i=numel(mpre)-1:-1:1
#                     mpre(i)=max(mpre(i),mpre(i+1));
#     """
#     # matlab indexes start in 1 but python in 0, so I have to do:
#     #     range(start=(len(mpre) - 2), end=0, step=-1)
#     # also the python function range excludes the end, resulting in:
#     #     range(start=(len(mpre) - 2), end=-1, step=-1)
#     for i in range(len(mpre) - 2, -1, -1):
#         mpre[i] = max(mpre[i], mpre[i + 1])
#     """
#      This part creates a list of indexes where the recall changes
#         matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
#     """
#     i_list = []
#     for i in range(1, len(mrec)):
#         if mrec[i] != mrec[i - 1]:
#             i_list.append(i)  # if it was matlab would be i + 1
#     """
#      The Average Precision (AP) is the area under the curve
#         (numerical integration)
#         matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
#     """
#     ap = 0.0
#     for i in i_list:
#         ap += ((mrec[i] - mrec[i - 1]) * mpre[i])
#     return ap, mrec, mpre


def compute_ap(recall, precision):  # recall为上面的recall_curve
    """ Compute the average precision, given the recall and precision curves.
    Code originally from https://github.com/rbgirshick/py-faster-rcnn.

    # Arguments
        recall:    The recall curve (list).
        precision: The precision curve (list).
    # Returns
        The average precision as computed in py-faster-rcnn.
    """
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.0], recall, [1.0]))
    mpre = np.concatenate(([0.0], precision, [0.0]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
        mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    # aa= mrec[1:]
    # bb = mrec[:-1]
    # a = mrec[1:] != mrec[:-1]
    # b = np.where(a)
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    # a1 = mrec[i + 1]
    # a2 = mrec[i]
    # a3 = mpre[i + 1]
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap


# ------------------------------------------一下三个函数为性能指标计算---------------------
# 计算每个类的预测的精度，被test.py引用
# 输入为：真阳性、置信度、预测的类别、真实值类别的列表
# 输出为：统计precision, recall, AP, f1, ap_class指标
def ap_per_class(tp, conf, pred_cls, target_cls):
    """ Compute the average precision, given the recall and precision curves.
    Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
    # Arguments
        tp:    True positives (list).
        conf:  Objectness value from 0-1 (list).
        pred_cls: Predicted object classes (list).
        target_cls: True object classes (list).
    # Returns
        The average precision as computed in py-faster-rcnn.
    """

    # Sort by objectness
    i = np.argsort(-conf)
    tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]

    # Find unique classes
    unique_classes = np.unique(target_cls)

    # Create Precision-Recall curve and compute AP for each class
    ap, p, r = [], [], []
    for c in tqdm.tqdm(unique_classes, desc="Computing AP"):
        i = pred_cls == c
        n_gt = (target_cls == c).sum()  # Number of ground truth objects
        n_p = i.sum()  # Number of predicted objects

        if n_p == 0 and n_gt == 0:
            continue
        elif n_p == 0 or n_gt == 0:
            ap.append(0)
            r.append(0)
            p.append(0)
        else:
            # fpc: 类别为c 顺序按置信度排列 截至到每一个预测框的各个iou阈值下FP个数 最后一行表示c类在该iou阈值下所有FP数
            # tpc: 类别为c 顺序按置信度排列 截至到每一个预测框的各个iou阈值下TP个数 最后一行表示c类在该iou阈值下所有TP数
            # Accumulate FPs and TPs
            fpc = (1 - tp[i]).cumsum()
            tpc = (tp[i]).cumsum()

            # Recall
            recall_curve = tpc / (n_gt + 1e-16)
            r.append(recall_curve[-1])

            # Precision
            precision_curve = tpc / (tpc + fpc)
            p.append(precision_curve[-1])

            # AP from recall-precision curve
            # voc_ap(recall_curve, precision_curve)
            ap.append(compute_ap(recall_curve, precision_curve))  # 引用下面的compute_ap函数

    # Compute F1 score (harmonic mean of precision and recall)
    p, r, ap = np.array(p), np.array(r), np.array(ap)
    f1 = 2 * p * r / (p + r + 1e-16)

    return p, r, ap, f1, unique_classes.astype("int32")


# 一张图像计算
#         #true_positives：预测框的正确与否，正确设置为1，错误设置为0
#         # pred_scores：预测框的置信度
#         # pred_labels：预测框的类别标签
def get_per_img_statistics(output, label_box, iou_threshold, class_pre, class_label):
    pred_boxes = output  # 当前图像的预测框的x,y,w,h
    # 长度为pred_boxes_num的list
    batch_metrics = []
    unique_classes = np.unique(class_label)
    for class_id in unique_classes:
        annotations = np.array(label_box)[class_label == class_id]
        pred_boxes = np.array(output)[class_pre == class_id]
        true_positives = np.zeros(len(pred_boxes))  # 真阳性，初始化为0，预测框和实际框匹配，则设置为1
        pre_box_iou = np.zeros(len(pred_boxes))  # 用IOU代替置信度
        if len(annotations):
            detected_boxes = []  # 记录检测结果
            class_ids = []
            for pred_i, (pred_box) in enumerate(zip(pred_boxes)):
                class_ids += [class_id]
                # If targets are found break
                if len(detected_boxes) == len(annotations):  # 如果所有的目标annotations都找到了，就退出
                    continue
                ious = np.array(bbox_iou2(pred_box, annotations))
                iou = np.max(ious)
                box_index = np.argmax(ious)
                pre_box_iou[pred_i] = iou

                # 每个预测框和所有真实框target_boxes计算IOU，获取IOU值最大的值，记住真实框的索引号box_index，防止多个预测框重复描述一个真实框
                if iou >= iou_threshold and box_index not in detected_boxes:
                    # 如果最大IOU大于阈值，且box_index未被匹配，则认为该真实目标框被发现。注意要防止被重复记录
                    true_positives[pred_i] = 1  # 对该预测框pred_i的真阳性标记设置为1
                    detected_boxes += [
                        box_index]  # 记录刚刚匹配成功的真实框target_boxes的索引号box_index，防止它被预测框pred_box重复标记，即一个实际框target_boxes只能被一个预测框pred_box成功匹配
        batch_metrics.append([true_positives, pre_box_iou, class_ids])  # 保存当前图片sample_i的评价性信息
        # true_positives：预测框的正确与否，正确设置为1，错误设置为0
        # pred_scores：预测框的置信度
        # pred_labels：预测框的类别标签
    return batch_metrics  # 保存当前batch的总体评价信息


def get_per_img_statistics_tab(output, label_box, iou_threshold, class_pre, class_label):
    # 长度为pred_boxes_num的list
    batch_metrics = []
    pre_cell_info_new = []
    unique_classes = np.unique(class_label)
    tp_index_table = []
    for class_id in unique_classes:
        annotations = np.array(label_box)[class_label == class_id]
        pred_boxes = np.array(output)[class_pre == class_id]
        true_positives = np.zeros(len(pred_boxes))  # 真阳性，初始化为0，预测框和实际框匹配，则设置为1
        pre_box_iou = np.zeros(len(pred_boxes))  # 用IOU代替置信度
        class_ids = []

        if len(annotations):
            detected_boxes = []  # 记录检测结果
            for pred_i, (pred_box) in enumerate(zip(pred_boxes)):
                class_ids += [class_id]
                # If targets are found break
                if len(detected_boxes) == len(annotations):  # 如果所有的目标annotations都找到了，就退出
                    continue
                ious = np.array(bbox_iou2(pred_box, annotations))
                iou = np.max(ious)
                box_index = np.argmax(ious)
                pre_box_iou[pred_i] = iou

                # 每个预测框和所有真实框target_boxes计算IOU，获取IOU值最大的值，记住真实框的索引号box_index，防止多个预测框重复描述一个真实框
                if iou >= iou_threshold and box_index not in detected_boxes:
                    # 如果最大IOU大于阈值，且box_index未被匹配，则认为该真实目标框被发现。注意要防止被重复记录
                    true_positives[pred_i] = 1  # 对该预测框pred_i的真阳性标记设置为1
                    # 记录刚刚匹配成功的真实框target_boxes的索引号box_index，防止它被预测框pred_box重复标记，即一个实际框target_boxes只能被一个预测框pred_box成功匹配
                    detected_boxes += [box_index]
                    # if class_id == 3:
                    #     pre_cell_info_new.append(cell_info[pred_i])
                    # pre_chars_new.append(pre_chars[pred_i])
            if class_id == np.array(2):
                tp_index_table = detected_boxes
        batch_metrics.append([true_positives, pre_box_iou, class_ids])  # 保存当前图片sample_i的评价性信息
        # true_positives：预测框的正确与否，正确设置为1，错误设置为0
        # pred_scores：预测框的置信度
        # pred_labels：预测框的类别标签
    return batch_metrics, tp_index_table  # 保存当前batch的总体评价信息



# 与此同时还可以计算编辑距离
def get_per_img_statistics_editdist(output, annotations, iou_threshold,pre_chars,gt_chars,diff_dir = "./"):

    pred_boxes = output  # 当前图像的预测框的x,y,w,h

    # 长度为pred_boxes_num的list
    true_positives = np.zeros(len(pred_boxes))  # 真阳性，初始化为0，预测框和实际框匹配，则设置为1
    pre_box_iou = np.zeros(len(pred_boxes))  # 用IOU代替置信度

    batch_metrics = []
    recogn_score = []
    len_chars = []
    if len(annotations):
        detected_boxes = []  # 记录检测结果
        target_boxes = annotations

        for pred_i, pred_box in enumerate(zip(pred_boxes)):
            # If targets are found break
            if len(detected_boxes) == len(annotations):  # 如果所有的目标annotations都找到了，就退出
                break

            # Ignore if label is not one of the target labels
            # if pred_label not in target_labels:  # 如果预测框不在target_labels中，则continue
            #     continue

            # 每个预测框和所有真实框target_boxes计算IOU，获取IOU值最大的值，记住真实框的索引号box_index，防止多个预测框重复描述一个真实框
            ious = np.array(bbox_iou2(pred_box, target_boxes))
            iou =np.max(ious)
            box_index = np.argmax(ious)

            #将IOU作为置信度
            pre_box_iou[pred_i] = iou
            if iou >= iou_threshold and box_index not in detected_boxes:
                # 如果最大IOU大于阈值，且box_index未被匹配，则认为该真实目标框被发现。注意要防止被重复记录
                true_positives[pred_i] = 1  # 对该预测框pred_i的真阳性标记设置为1
                detected_boxes += [box_index]  # 记录刚刚匹配成功的真实框target_boxes的索引号box_index，防止它被预测框pred_box重复标记，即一个实际框target_boxes只能被一个预测框pred_box成功匹配
                prestr = pre_chars[pred_i]
                gdtstr = gt_chars[box_index]
                from rec.recongnize import damerau_levenshtein_distance
                dist = damerau_levenshtein_distance(prestr, gdtstr, diff_dir=diff_dir)
                score = 0
                if 1.0 - dist / len(gt_chars[box_index]) > 0:
                    score = 1.0 - dist / len(gt_chars[box_index])
                recogn_score.append(score)
                len_chars.append(len(gt_chars[box_index]))
    batch_metrics.append([true_positives, pre_box_iou, recogn_score])  # 保存当前图片的评价性信息
    # true_positives：预测框的正确与否，正确设置为1，错误设置为0
    # pred_scores：预测框的置信度 用IOU代替
    return batch_metrics  # 保存当前img的总体评价信息

def getpr(true_positives, num_label):
    tp = sum(true_positives)
    tp_fp = len(true_positives)
    percision = tp/tp_fp
    recall = tp/num_label
    return percision, recall

