# bbox的检测，几个指标
# precision、recall、IOU、CLS、F1_score（分类是否正确）
# 这个的测试就得从ExcelChart 400K内得出了，不同指标我们计算方式不同
# 首先时绘图区域的检测要求越精确越好，因为要根据绘图区域的坐标计算数值

# 对于图例、标题等内容只要一个大致坐标，不要求特别精确，只要能把内容包裹进去就行
# 对于InnerPlotArea，涉及到数值计算这种要求比较精确
from .benchmark import Benchmark
import os
import json
from .utils import combine_cls_predict

LABELS = {
    "Legend": 0,
    "ValueTitle": 1,
    "Title": 2,
    "CategoryTitle": 3,
    "BarCanvas": 4,
    "LineCanvas": 5,
    "PieCanvas": 6,
    "Bar": 7,
    "Line": 8,
    "Pie": 9,
    "Pictogram": 10
}


class ClsBenchmark(Benchmark):
    def __init__(self):
        super().__init__()
        self.pred = None
        self.gt = None

    def get_area(self, box1, box2):
        if not self.isIntersect(box1, box2):
            return 0, 0, 0

        intersect_area_x1 = box1[0] if box1[0] > box2[0] else box2[0]
        intersect_area_y1 = box1[1] if box1[1] > box2[1] else box2[1]

        intersect_area_x2 = box2[0] + box2[2] if box1[0] + box1[2] > box2[0] + box2[2] else box1[0] + box1[2]
        intersect_area_y2 = box2[1] + box2[3] if box1[1] + box1[3] > box2[1] + box2[3] else box1[1] + box1[3]

        intersect_area = abs(intersect_area_x2 - intersect_area_x1) * abs(intersect_area_y2 - intersect_area_y1)

        box1_area = box1[2] * box1[3]
        box2_area = box2[2] * box2[3]

        return intersect_area, box1_area, box2_area

    @staticmethod
    def isIntersect(box1, box2) -> bool:
        """
        判断两个矩形是否相交
        原理：两个矩形中心点x距离小于两个矩形半宽的和并且两个矩形中心点y距离小于两个矩形半高的和即两个矩形相交
        box1 [x0, y0, w, h]
        box2 [x0, y0, w, h]
        """
        return (abs(2 * box1[0] - 2 * box2[0] + box1[2] - box2[2]) - box1[2] - box2[2]) < 0 \
            and (abs(2 * box1[1] - 2 * box2[1] + box1[3] - box2[3]) - box1[3] - box2[3]) < 0

    def load(self, yolo_pred_path: str, corner_pred_path: str, cached_pred_path: str, gt_path: str):
        """
        加载数据,需要转换成内部格式
        """
        with open(gt_path, "r") as fp:
            self.gt = json.load(fp)

        # self.model_type = pred_path.split("\\")[-1].split("/")[-1].split("_")[-2]
        with open(yolo_pred_path, "r") as fp:
            yolo_pred_results = json.load(fp)

        with open(corner_pred_path, "r") as fp:
            corner_pred_results = json.load(fp)

        with open(cached_pred_path, "r") as fp:
            cached_pred_results = json.load(fp)

        self.pred = combine_cls_predict(yolo_pred_results, corner_pred_results, cached_pred_results)

    def _eval(self, IOU_threshold: float = 0.5):
        # 类别判断是否有误,IOU小于50%认为是错误的
        component_recognize = [[0, 0] for i in range(7)]
        # 每一个类别的左上角和宽高的precision和recall
        component_iou = [[] for i in range(7)]
        component_precision = [[] for i in range(7)]
        component_recall = [[] for i in range(7)]
        component_confidence = [[] for i in range(7)]
        time_usage = []
        fail_rec_img = [[] for i in range(7)]
        title_rec = [0, 0]

        for gt_key in self.gt.keys():
            gt_value = self.gt[gt_key]
            pred_value = self.pred[gt_key]

            gt_data_type = gt_value["data_type"]

            try:
                pred_data_type = pred_value["data_type"]
            except:
                print("no data type")
                continue

            gt_map = {LABELS[key]: gt_value[key] for key in gt_value if key in LABELS.keys() and LABELS[key] < 7}
            pred_map = {int(key): pred_value[key] for key in pred_value.keys() if key != "data_type"}

            time_usage.append(0)

            title_type_rec = False
            have_title_type = False
            for category_id in gt_map.keys():

                if 1 <= category_id <= 3:
                    have_title_type = True

                if category_id in pred_map.keys():
                    gt_bbox = gt_map[category_id]
                    pred_bbox = pred_map[category_id][0:4]
                    intersect_area, gt_area, pred_area = self.get_area(gt_bbox, pred_bbox)
                    if intersect_area == 0:
                        IOU = 0
                    else:
                        IOU = intersect_area / (gt_area + pred_area - intersect_area)
                    if IOU < IOU_threshold:
                        fail_rec_img[category_id].append(gt_key)
                        component_recognize[category_id][0] += 1
                    else:
                        component_recognize[category_id][0] += 1
                        component_recognize[category_id][1] += 1
                        precision = intersect_area / gt_area
                        component_precision[category_id].append(precision)
                        recall = intersect_area / pred_area
                        component_recall[category_id].append(recall)
                        component_confidence[category_id].append(pred_map[category_id][-1])
                        component_iou[category_id].append(IOU)
                        if 1 <= category_id <= 3:
                            title_type_rec = True
                else:
                    fail_rec_img[category_id].append(gt_key)
                    component_recognize[category_id][0] += 1

            if have_title_type:
                title_rec[0] += 1
            if title_type_rec:
                title_rec[1] += 1
        # with open("fail_rec.json", "w") as fp:
        #     json.dump(fail_rec_img,fp)

        print(title_rec[1] / title_rec[0])
        return component_recognize, component_iou, component_precision, component_recall, component_confidence, time_usage

    def eval(self, IOU_threshold: float = 0.5, store_file: str = "cls_benchmark.json"):
        (component_recognize, component_iou, component_precision, component_recall,
         component_confidence, time_usage) = self._eval(IOU_threshold)
        print(component_recognize)
        component_recognize_rate = [((item[1] / item[0]) * 100) for item in component_recognize]
        component_iou_average = [((sum(item) / len(item)) * 100) if len(item) > 0 else 0 for item in component_iou]
        component_precision_average = [((sum(item) / len(item)) * 100) if len(item) > 0 else 0 for item in
                                       component_precision]
        component_recall_average = [((sum(item) / len(item)) * 100) if len(item) > 0 else 0 for item in
                                    component_recall]
        component_confidence_average = [((sum(item) / len(item)) * 100) if len(item) > 0 else 0 for item in
                                        component_confidence]
        time_usage_per_image_ms = sum(time_usage) / len(time_usage)

        category_name = ["Legend", "ValueAxisTitle", "ChartTitle", "CategoryAxisTitle", "BarPlotArea", "LinePlotArea",
                         "PiePlotArea"]

        print("summary\trecognize_rate\tiou_average\tprecision_average\trecall_average\tconfidence_average")
        for i in range(7):
            print(category_name[i], end="\t")
            print("%.4f" % component_recognize_rate[i], end="\t")
            print("%.4f" % component_iou_average[i], end="\t")
            print("%.4f" % component_precision_average[i], end="\t")
            print("%.4f" % component_recall_average[i], end="\t")
            print("%.4f" % component_confidence_average[i], end="\t")
            print("")
        print("time usage per image: %.2fms" % time_usage_per_image_ms)

        with open(f"{store_file}", "w") as fp:
            json.dump({
                "raw": {
                    "component_recognize": component_recognize,
                    "component_iou": component_iou,
                    "component_precision": component_precision,
                    "component_recall": component_recall,
                    "component_confidence": component_confidence,
                    "time_usage": time_usage
                },
                "average": {"recognize_rate": component_recognize_rate,
                            "iou_average": component_iou_average,
                            "precision_average": component_precision_average,
                            "recall_average": component_recall_average,
                            "confidence_average": component_confidence_average}
            }, fp)


if __name__ == "__main__":
    gt_result_path = r"D:\Code\script\_banchmark\benchmark\dataset\CV\cls\labels\val.json"
    pred_result_path = r"D:\Code\script\_banchmark\benchmark\temp\CV\cls\yolov7_pred.json"
    yolo_cls_benchmark = ClsBenchmark()
    yolo_cls_benchmark.load(pred_result_path, gt_result_path)
    yolo_cls_benchmark.eval()
