# -*- coding: utf-8 -*-
# @Time    : 2018/11/16 11:01
# @Author  : cj
# @File    : test.py
import os

import cv2
import numpy as np

# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from src.utils.basic_utils import DetectResult
from src.server import ShelfServer
from src.utils.context import Context
from src.detection.utils.io import get_label_from_pd_file
from src.detection.utils.io import get_label_list_from_category_index
from src.detection.utils.evaluator import eval_detect_result
from src.detection.utils.evaluator import read_xml_as_eval_info
import glob
from src.detection.predict_caffe import compute_recall_and_precision


def get_overlap_area(rect1, rect2):
    """
    计算两个矩形的重叠部分的面积，如果不重叠返回0

    Parameters
    ----------
    rect1: 矩形1，形如[axis1_min, axis2_min, axis1_max, axis2_max]
    rect2: 矩形2，形如[axis1_min, axis2_min, axis1_max, axis2_max]

    Returns
    -------

    """
    if not is_overlap(rect1, rect2):
        return 0.0
    xmin = max(rect1[0], rect2[0])
    ymin = max(rect1[1], rect2[1])
    xmax = min(rect1[2], rect2[2])
    ymax = min(rect1[3], rect2[3])
    return (xmax - xmin) * (ymax - ymin)


def is_overlap(rect1, rect2):
    """
    判断矩形是否存在重叠

    Parameters
    ----------
    rect1: 矩形1，形如[axis1_min, axis2_min, axis1_max, axis2_max]
    rect2: 矩形2，形如[axis1_min, axis2_min, axis1_max, axis2_max]

    Returns
    -------

    """
    return not ((rect1[0] >= rect2[2]) or
                (rect1[1] >= rect2[3]) or
                (rect1[2] <= rect2[0]) or
                (rect1[3] <= rect2[1]))


class EvaluateDetectResult:
    def __init__(self, detect_result: DetectResult, xml_file):
        self._detect_result = detect_result
        self._xml_file = xml_file

    def evaluate(self):
        label_file = r'D:\workspace\Shelf-product-identification\pdtxt\label_map_mifen.pdtxt'
        category_index = get_label_from_pd_file(label_file, 476)
        label_list = get_label_list_from_category_index(category_index)
        boxes = []
        positions = self._detect_result.positions
        _classes = []
        _boxes = []
        _scores = []
        for box in positions:  # box: [[x1, y1], [x2, y2], ... ]
            boxes.append(np.array([box[0][1], box[0][0], box[2][1], box[2][0]]))
        scores = self._detect_result.scores
        classes = self._detect_result.labels

        for box, sc, cls in zip(boxes, scores, classes):
            if cls not in label_list:
                continue
            _boxes.append(box)
            _scores.append(sc)
            _classes.append(label_list.index(cls))

        objects = read_xml_as_eval_info(self._xml_file, label_list)['objects']
        gt_classes, gt_boxes = np.hsplit(np.array(objects), [1])
        if len(objects) == 0:
            gt_boxes = []
            gt_classes = []
        else:
            gt_classes = list(gt_classes.flatten())

            # ([ xmin, ymin, xmax, ymax])变成ymin, xmin, ymax, xmax]
            gt_boxes = gt_boxes[:, (1, 0, 3, 2)]
            gt_boxes = list(map(lambda x: list(x), gt_boxes))

        gt, pred = eval_detect_result(gt_boxes, list(map(lambda x: x + 1, gt_classes)),
                                      _boxes, list(map(lambda y: y + 1, _classes)), default_class=0)
        return gt, pred


def main():
    data_dir = r"D:\workspace\shelf_test_imgs\mifen"
    img_names = glob.glob(os.path.join(data_dir, "*.jpg"))
    ground_true_after = []
    predictions_after = []
    for img_name in img_names:
        xml_file = img_name[0:-4] + ".xml"
        print(xml_file)
        img = cv2.imread(img_name)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = [img]
        ss = ShelfServer(img, Context(r"D:\workspace\Shelf-product-identification\config.xml"))
        edr_after = EvaluateDetectResult(ss.detect_result, xml_file)
        gt_after, pred_after = edr_after.evaluate()
        print(
            "gt:\n", gt_after,
            "pred:\n", pred_after)
        ground_true_after += gt_after
        predictions_after += pred_after
        detected_img = ss.get_img_after_draw()
        # detected_img = cv2.cvtColor(detected_img, cv2.COLOR_RGB2BGR)
        cv2.imwrite(img_name+"-after.png", detected_img)
    print("after complete: ")
    precision, recall = compute_recall_and_precision(ground_true_after, predictions_after)
    print('Total Recall: ', recall)
    print('Total Precision: ', precision)


if __name__ == '__main__':
    main()
