import collections
import xml.etree.ElementTree as ET
import sys
sys.path.append("E:\\tensorflowtrain\models-master\\research")
import numpy as np
from object_detection.utils import visualization_utils as visual_utils
from sklearn.metrics import average_precision_score

from .basic import add_element
from .basic import get_overlap_area
from .basic import is_overlap


def visualize_boxes_and_labels_on_image_array(image,
                                              boxes,
                                              classes,
                                              scores,
                                              instance_masks=None,
                                              keypoints=None,
                                              use_normalized_coordinates=False,
                                              max_boxes_to_draw=20,
                                              min_score_thresh=.5,
                                              agnostic_mode=False,
                                              line_thickness=4):
    """Overlay labeled boxes on an image with formatted scores and label names.

    This function groups boxes that correspond to the same location
    and creates a display string for each detection and overlays these
    on the image. Note that this function modifies the image in place, and returns
    that same image.

    Args:
      image: uint8 numpy array with shape (img_height, img_width, 3)
      boxes: a numpy array of shape [N, 4]
      classes: a numpy array of shape [N]. Note that class indices are 1-based,
        and match the keys in the label map.
      scores: a numpy array of shape [N] or None.  If scores=None, then
        this function assumes that the boxes to be plotted are groundtruth
        boxes and plot all boxes as black with no classes or scores.
      instance_masks: a numpy array of shape [N, image_height, image_width], can
        be None
      keypoints: a numpy array of shape [N, num_keypoints, 2], can
        be None
      use_normalized_coordinates: whether boxes is to be interpreted as
        normalized coordinates or not.
      max_boxes_to_draw: maximum number of boxes to visualize.  If None, draw
        all boxes.
      min_score_thresh: minimum score threshold for a box to be visualized
      agnostic_mode: boolean (default: False) controlling whether to evaluate in
        class-agnostic mode or not.  This mode will display scores but ignore
        classes.
      line_thickness: integer (default: 4) controlling line width of the boxes.

    Returns:
      uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
    """
    # Create a display string (and color) for every box location, group any boxes
    # that correspond to the same location.
    box_to_display_str_map = collections.defaultdict(list)
    box_to_color_map = collections.defaultdict(str)
    box_to_instance_masks_map = {}
    box_to_keypoints_map = collections.defaultdict(list)
    if not max_boxes_to_draw:
        max_boxes_to_draw = boxes.shape[0]
    for i in range(min(max_boxes_to_draw, boxes.shape[0])):
        if scores is None or scores[i] > min_score_thresh:
            box = tuple(boxes[i].tolist())
            if instance_masks is not None:
                box_to_instance_masks_map[box] = instance_masks[i]
            if keypoints is not None:
                box_to_keypoints_map[box].extend(keypoints[i])
            if scores is None:
                box_to_color_map[box] = 'black'
            else:
                box_to_display_str_map[box].append('')
                if agnostic_mode:
                    box_to_color_map[box] = visual_utils.STANDARD_COLORS[agnostic_mode]
                else:
                    box_to_color_map[box] = visual_utils.STANDARD_COLORS[
                        classes[i] % len(visual_utils.STANDARD_COLORS)]

    # Draw all boxes onto image.
    for box, color in box_to_color_map.items():
        ymin, xmin, ymax, xmax = box
        if instance_masks is not None:
            visual_utils.draw_mask_on_image_array(
                image,
                box_to_instance_masks_map[box],
                color=color
            )
        visual_utils.draw_bounding_box_on_image_array(
            image,
            ymin,
            xmin,
            ymax,
            xmax,
            color=color,
            thickness=line_thickness,
            display_str_list=box_to_display_str_map[box],
            use_normalized_coordinates=use_normalized_coordinates)
        if keypoints is not None:
            visual_utils.draw_keypoints_on_image_array(
                image,
                box_to_keypoints_map[box],
                color=color,
                radius=line_thickness / 2,
                use_normalized_coordinates=use_normalized_coordinates)

    return image


def get_revolve_info(angle, shape, reverse=False):
    """
    获取旋转矩阵，旋转前或旋转后的图像形状，中心偏移量

    Parameters
    ----------
    angle: 旋转角度
    shape: 旋转前或旋转后的图像形状, 当 `reverse` 为 `False`，为旋转前
    reverse: `True` 表明将旋转后的的对象还原，`False` 表明旋转对象

    Returns
    -------
    旋转矩阵， 旋转前或旋转后的图像形状， 中心偏移量
    """
    arc = angle * np.pi / 180
    _shape = (np.ceil(abs(shape[0] * np.cos(arc)) + abs(shape[1] * np.sin(arc))).astype(np.int32),
              np.ceil(abs(shape[0] * np.sin(arc)) + abs(shape[1] * np.cos(arc))).astype(np.int32))
    offset = list(map(lambda x, y: (x - y) // 2, _shape, shape))
    if reverse:
        arc = -arc
        other_shape, shape = shape, _shape
    else:
        other_shape = _shape
    revolve_mat_1 = np.matrix([[1, 0, 0],
                               [0, -1, 0],
                               [-0.5 * shape[1], 0.5 * shape[0], 1]])
    revolve_mat_2 = np.matrix([[np.cos(arc), -np.sin(arc), 0],
                               [np.sin(arc), np.cos(arc), 0],
                               [0, 0, 1]])
    revolve_mat_3 = np.matrix([[1, 0, 0],
                               [0, -1, 0],
                               [0.5 * other_shape[1], 0.5 * other_shape[0], 1]])
    return revolve_mat_1 * revolve_mat_2 * revolve_mat_3, _shape, offset


def revolve_boxes(boxes, revolve_mat, src_shape, dst_shape=None):
    """
    revolve the boxes into the origin image

    Parameters
    ----------
    boxes
    revolve_mat
    src_shape
    dst_shape

    Returns
    -------

    """
    if dst_shape is None:
        dst_shape = src_shape
    result_boxes = []
    for box in boxes.tolist():
        points = np.matrix([[box[1] * src_shape[1], box[0] * src_shape[0], 1],
                            [box[3] * src_shape[1], box[0] * src_shape[0], 1],
                            [box[1] * src_shape[1], box[2] * src_shape[0], 1],
                            [box[3] * src_shape[1], box[2] * src_shape[0], 1]])
        revolved_points = points * revolve_mat
        min_x, min_y, max_x, max_y = dst_shape[1], dst_shape[0], 0, 0
        for point in revolved_points.tolist():
            min_x = min(min_x, point[0])
            min_y = min(min_y, point[1])
            max_x = max(max_x, point[0])
            max_y = max(max_y, point[1])
        result_boxes.append([min_y / dst_shape[0], min_x / dst_shape[1],
                             max_y / dst_shape[0], max_x / dst_shape[1]])
    return result_boxes


def merge_region_prediction(boxes, scores, classes, percent):
    """
    merge the overlapped boxes, if two boxes are overlapped and the Overlap Rate
    is more than percent, remove the one with smaller score
    合并重叠的框，如果两个框重叠并且重叠率
     超过百分比，删除一个较小的分数
    :param boxes:
    :param scores:
    :param classes:
    :param percent:
    :return:
    """
    # 返回scores的值按降序排序的对应的索引,并将boxes，scores，classes进行降序排序
    idx = np.argsort(-scores)
    boxes = boxes[idx]
    scores = scores[idx]
    classes = classes[idx]

    _boxes = [boxes[0]]
    _scores = [scores[0]]
    _classes = [classes[0]]

    i=-1
    # 在原来的基础上更改过
    for box, score_, cls in zip(boxes, scores, classes):
        i=i+1
        is_add = True
        for index,(_box, _sc, _cls )in enumerate(zip(boxes, scores, classes)):
            if index>i:
                if is_overlap(box, _box):
                    # 找出两个框中更小的框
                    src_area = min((_box[2] - _box[0]) * (_box[3] - _box[1]),
                                   (box[2] - box[0]) * (box[3] - box[1]))
                    # 去两个框的重叠区域
                    area = get_overlap_area(_box, box)
                    if (src_area < 0.001) or ((area / src_area) > percent):
                        is_add = False
                        break
        if is_add:
            _boxes.append(box)
            _scores.append(score_)
            _classes.append(cls)
    return _boxes, _classes, _scores
"""
函数将gsk牙膏切割所得的图片转换为大图
"""
def convert_gsk_box_to_global(info,boxes,classes,scores,top):
    src_h, src_w = info['shape'][:2]
    h, w = info['crop_shape'][:2]
    _boxes = []
    _scores = []
    _classes = []
    # box为图中的一个框，score为对应框的得分，cls为对应框的类别？
    # 将小图的坐标转换到大图的坐标,因为box是按比例缩放，所以先乘以小图的相应边，加上切割点就是该box在大图的位置
    for box, score, cls in zip(boxes, scores, classes):
        xmin = box[1]
        ymin = (box[0] * h + top) / src_h
        xmax = box[3]
        ymax = (box[2] * h + top) / src_h


        if (xmax > 1.0) or (ymax > 1.0):
            print("ymax", ymax)
            print("xmax", xmax)
            assert xmax>=1.0
            assert ymax>=1.0
            continue
        _boxes.append([ymin, xmin, ymax, xmax])
        _scores.append(score)
        _classes.append(cls)
    return _boxes, _classes, _scores


"""
函数功能：将小图的box坐标转换到大图的box坐标
"""

def convert_region_box_to_global(info, boxes, classes, scores, index, reverse=False):
    # 原图高为1280，原图宽为1280
    src_h, src_w = info['shape'][:2]
    # 小图（原来的图）高为400，宽为400
    h, w = info['crop_shape'][:2]
    # index='{}_{}'.format(seat[1], seat[0])
    # index: 360_360，index: 360_520，index: 520_360，index: 520_520
    idx_x, idx_y = int(index.split('_')[0]), int(index.split('_')[1])
    _boxes = []
    _scores = []
    _classes = []

    if reverse:
        factor = -1
    else:
        factor = 1
    # box为图中的一个框，score为对应框的得分，cls为对应框的类别？
    # 将小图的坐标转换到大图的坐标,因为box是按比例缩放，所以先乘以小图的相应边，加上切割点就是该box在大图的位置
    for box, score, cls in zip(boxes, scores, classes):
        xmin = (box[1] * w + factor * idx_x) / src_w
        ymin = (box[0] * h + factor * idx_y) / src_h
        xmax = (box[3] * w + factor * idx_x) / src_w
        ymax = (box[2] * h + factor * idx_y) / src_h
        if (xmax >= 1.0) or (ymax >= 1.0):
            continue
        _boxes.append([ymin, xmin, ymax, xmax])
        _scores.append(score)
        _classes.append(cls)
    return _boxes, _classes, _scores

"""
读取XML文件
"""
def read_xml_as_eval_info(xml_path, label_list):
    # 调用parse()方法，返回解析树

    tree = ET.parse(xml_path)
    # root <Element 'annotation' at 0x000000001C17FC78>
    root = tree.getroot()
    size = root.find('size')
    info = {}
    width = int(size.find('width').text)

    height = int(size.find('height').text)
    info['shape'] = (height, width)
    objects = []
    for obj in root.iter('object'):
        cls_name = obj.find('name').text
        if cls_name not in label_list:
            continue
        xml_box = obj.find('bndbox')
        # xmin = int(xml_box.find('xmin').text) / width
        # ymin = int(xml_box.find('ymin').text) / height
        # xmax = int(xml_box.find('xmax').text) / width
        # ymax = int(xml_box.find('ymax').text) / height
        xmin = int(xml_box.find('xmin').text)
        ymin = int(xml_box.find('ymin').text)
        xmax = int(xml_box.find('xmax').text)
        ymax = int(xml_box.find('ymax').text)
        objects.append([label_list.index(cls_name), xmin, ymin, xmax, ymax])

    info['objects'] = objects
    return info
"""
评估测试结果：ground_true [0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1.0, 1.0, 1.0, 1.0, 1.0]，predictions [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
true_boxes：精标准的框，true_classes：精标准的类别
pred_boxes：预测的框，pred_boxes：预测的类别，
"""


def eval_detect_result(true_boxes, true_classes, pred_boxes, pred_classes,
                       threshold=0.6, default_class=-1):

    ground_true = []
    predictions = []
    _index = []
    for box, cls in zip(pred_boxes, pred_classes):
        index = -1
        overlap_rate = 0.0
        for idx, (t_box, t_cls) in enumerate(zip(true_boxes, true_classes)):
            if is_overlap(t_box, box):
                area = get_overlap_area(t_box, box)
                src_area = min((box[2] - box[0]) * (box[3] - box[1]),
                               (t_box[2] - t_box[0]) * (t_box[3] - t_box[1]))
                if ((area / src_area) > threshold) and (cls == t_cls):
                    if (area / src_area) > overlap_rate:
                        overlap_rate = area / src_area
                        index = idx
                        _index.append(index)
        ground_true.append(default_class if index == -1 else cls)
        predictions.append(cls)

    # 添加没有预测出来的
    for idx in range(len(true_classes)):
        if idx not in _index:
            ground_true.append(true_classes[idx])
            predictions.append(default_class)

    return ground_true, predictions


