import random
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import math
from utils import image_preporcess, image_resize
from tqdm import tqdm


class Dataset(object):
    def __init__(self, dataset_type):
        """
        初始化数据相关的变量
        :param dataset_type: 数据集的类型
        """
        self.annotations_file_path = "data/train.txt" if dataset_type == 'train' else "data/test.txt"
        self.label_file_path = "data/label_file.txt"
        # 是否需要数据增强
        self.data_aug = False
        self.input_size = np.array([416, 416])
        self.classes_num = 2
        self.batch_size = 3
        self.strides = np.array([8, 16, 32])
        self.label_name_to_idx = self.label_to_index()
        # python3 整数/整数 = 浮点数，需要转化为整数
        self.feature_map_size = np.array([int(self.input_size[0] / self.strides[0]),
                                          int(self.input_size[0] / self.strides[1]),
                                          int(self.input_size[0] / self.strides[2])])
        self.annotations_infos = self.get_annotaions_info(self.annotations_file_path)
        self.anchors = np.array([[[10, 13],
                                  [16, 30],
                                  [33, 23]],
                                 [[30, 61],
                                  [62, 45],
                                  [59, 119]],
                                 [[116, 90],
                                  [156, 198],
                                  [373, 326]]])
        self.batch_count = 0
        self.batch_numes = math.ceil(len(self.annotations_infos) / self.batch_size)

    def get_annotaions_info(self, file_path):
        """
        获取数据集中的每一行并进行打乱
        :param file_path: 数据集的文件路径
        :return: list
        """
        with open(file_path, 'r') as f:
            img_paths = f.readlines()
        anno_lists = []
        for i in img_paths:
            anno_lists.append(i.strip())
        random.shuffle(anno_lists)
        return anno_lists

    def __iter__(self):
        return self

    def label_to_index(self):
        """
        获取类别名称到标签序号的映射字典
        :return: dict
        """
        label_name_to_idx = {}
        with open(self.label_file_path, 'r') as f:
            label_list = f.readlines()
        for idx, _label in enumerate(label_list):
            label_name_to_idx[_label.strip()] = idx

        return label_name_to_idx

    def parse_annotation_file(self, data_line, debug=False):
        """
        获取每一张图片进行与处理之后的图片
        :param data_line: 数据集文件的一个样本，格式为 "/xx/xx/xx.jpg /xx/xx/xx.xml"
        :param debug: 是否显示预处理之后的图片
        :return: img, gt_boxes
        """
        img_file_path = data_line.strip().split(' ')[0]
        xml_file_path = data_line.strip().split(' ')[1]
        tree = ET.parse(xml_file_path)
        root = tree.getroot()
        annotation_dict = {}
        img_width = int(root.find('size').find('width').text)
        img_height = int(root.find('size').find('height').text)
        annotation_dict['img_width'] = img_width
        annotation_dict['img_height'] = img_height
        annotation_dict['boxes'] = []

        box_list = []
        for node in root.findall("object"):
            label_name = node.find('name').text
            label_idx = self.label_name_to_idx.get(label_name)
            xmin = int(node.find('bndbox').find('xmin').text)
            ymin = int(node.find('bndbox').find('ymin').text)
            xmax = int(node.find('bndbox').find('xmax').text)
            ymax = int(node.find('bndbox').find('ymax').text)
            box_list.append([xmin, ymin, xmax, ymax, label_idx])
        annotation_dict['boxes'] = np.array(box_list)

        img = cv2.imread(img_file_path)
        img_ok, bbox_ok = image_resize(img, self.input_size, annotation_dict['boxes'])
        if (debug):
            for box in bbox_ok:
                cv2.rectangle(img_ok, tuple(box[:2]), tuple(box[2:4]), (255, 0, 0), 2)
            cv2.imshow("img", img_ok)
            cv2.waitKey(0)

        return img_ok, bbox_ok

    def _iou(self, box1, box2):
        """

        :param box1: shape (4, )
        :param box2: shape [3, 4]
        :return:
        """
        gt_box_xywh = np.array(box1)
        anchor_box_xywh = np.array(box2)
        gt_box_area = np.maximum(gt_box_xywh[2] * gt_box_xywh[3], 0)
        # xmin, ymin, xmax, ymax
        gt_box = np.concatenate((gt_box_xywh[:2] - gt_box_xywh[2:4] * 0.5, gt_box_xywh[:2] + gt_box_xywh[2:4] * 0.5))
        anchor_box = np.concatenate((anchor_box_xywh[:, :2] - anchor_box_xywh[:, 2:4] * 0.5,
                                     anchor_box_xywh[:, :2] + anchor_box_xywh[:, 2:4] * 0.5), axis=1)
        ious_l = []
        for anchor in anchor_box:
            anchor_area = np.maximum((anchor[2] - anchor[0]) * (anchor[3] - anchor[1]), 0)
            xy_min = np.maximum(np.maximum(anchor[:2], gt_box[:2]), 0)
            xy_max = np.maximum(np.minimum(anchor[2:], gt_box[2:]), 0)
            over_area = np.maximum((xy_max[0] - xy_min[0]) * (xy_max[1] - xy_min[1]), 0)
            ious_l.append(over_area / (gt_box_area + anchor_area - over_area))

        return ious_l

    def get_one_img_gt_label(self, anno_bboxes):
        layer_gt_label = [np.zeros(
            shape=(self.feature_map_size[layer_idx], self.feature_map_size[layer_idx], 3, 5 + self.classes_num))
            for layer_idx in range(3)]
        for bbox in anno_bboxes:
            onehot = np.zeros(self.classes_num, dtype=np.float)
            onehot[int(bbox[4])] = 1.0
            ious = []
            # [中心点坐标x, 中心点坐标y， w，h]
            bbox_xywh = np.concatenate((bbox[:2] + bbox[2:4] * 0.5, (bbox[2:4] - bbox[:2])), axis=0)
            bbox_xywh_scaled = bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis]
            for layer_idx in range(3):
                x_point, center_x = math.modf(bbox_xywh_scaled[layer_idx][0])
                y_point, center_y = math.modf(bbox_xywh_scaled[layer_idx][1])
                anchor_wh = np.array([self.anchors[layer_idx][idx] for idx in range(3)])
                anchor_xy = np.tile(np.array([center_x, center_y]), 3).reshape(3, 2) + np.array([[0.5, 0.5]])
                layer_anchor_xywh = np.concatenate((anchor_xy, anchor_wh), axis=1)
                iou = self._iou(bbox_xywh_scaled[layer_idx], layer_anchor_xywh)
                ious.extend(self._iou(bbox_xywh_scaled[layer_idx], layer_anchor_xywh))
                # https://yunyang1994.github.io/2018/12/28/YOLOv3/
                # 如果 Anchor 与 Ground-truth Bounding Boxes 的 IoU > 0.3，标定为正样本;
                # 在第 1 种规则下基本能够产生足够多的样本，但是如果它们的 iou 不大于 0.3，那么只能把 iou 最大的那个 Anchor 标记为正样本，
                # 这样便能保证每个 Ground-truth 框都至少匹配一个先验框。
                # 作者认为:
                # 按照上述原则，一个 ground-truth 框会同时与多个先验框进行匹配。记得之前有人问过我，为什么不能只用 iou 最大的 anchor 去负责预测该物体？其实我想回答的是，
                # 如果按照这种原则去分配正负样本，那么势必会导致正负样本的数量极其不均衡（正样本特别少，负样本特别多），
                # 这将使得模型在预测时会出现大量漏检的情况。实际上很多目标检测网络都会避免这种情况，并且尽量保持正负样本的数目相平衡。
                # 例如，SSD 网络就使用了 hard negative mining 的方法对负样本进行抽样，抽样时按照置信度误差（预测背景的置信度越小，误差越大）进行降序排列，
                # 选取误差较大的 top-k 作为训练的负样本，以保证正负样本的比例接近1:3。
                # 我认为：这里主要的问题就是正负样本比例的问题
                # """
                best_anchor_id = np.argmax(iou, axis=0)
                # 注意： 索引只能是整数，所以下面的center_x，center_y是float，需要转化为整数，否则会报错
                # 先使标签的地方为0， 避免不必要的错误
                layer_gt_label[layer_idx][int(center_y), int(center_x), best_anchor_id, :] = 0.0
                # 这里需要看到底是归一化之后的还是没有归一化的
                layer_gt_label[layer_idx][int(center_y), int(center_x), best_anchor_id, 0:4] = bbox_xywh[
                    layer_idx]
                layer_gt_label[layer_idx][int(center_y), int(center_x), best_anchor_id, 5] = 1.0
                layer_gt_label[layer_idx][int(center_y), int(center_x), best_anchor_id, 5:] = onehot
                # 每一个输出层挑选一个和gt和anchor框iou最大anchor作为gt
        return layer_gt_label

    def __next__(self):
        batch_image = np.zeros(shape=(self.batch_size, self.input_size[0], self.input_size[1], 3))
        batch_gt_lbox_label = np.zeros(shape=(self.batch_size, self.feature_map_size[0],
                                              self.feature_map_size[0], 3, 5 + self.classes_num))
        batch_gt_mbox_label = np.zeros(shape=(self.batch_size, self.feature_map_size[1],
                                              self.feature_map_size[1], 3, 5 + self.classes_num))
        batch_gt_sbox_label = np.zeros(shape=(self.batch_size, self.feature_map_size[2],
                                              self.feature_map_size[2], 3, 5 + self.classes_num))
        num = 0
        if self.batch_count < self.batch_numes:
            while num < self.batch_size:
                index = self.batch_count * self.batch_size + num
                if index >= len(self.annotations_infos):
                    index -= len(self.annotations_infos)
                img, anno_bboxes = self.parse_annotation_file(self.annotations_infos[index])
                one_img_gt_lbox_label, one_img_gt_mbox_label, one_img_gt_sbox_label = self.get_one_img_gt_label(
                    anno_bboxes)
                batch_image[num] = img
                batch_gt_lbox_label[num] = one_img_gt_lbox_label
                batch_gt_mbox_label[num] = one_img_gt_mbox_label
                batch_gt_sbox_label[num] = one_img_gt_sbox_label
                num += 1

            self.batch_count += 1
        else:
            self.batch_count = 0
            random.shuffle(self.annotations_infos)
            raise StopIteration

        return batch_image, batch_gt_lbox_label, batch_gt_mbox_label, batch_gt_sbox_label

    def __len__(self):
        return self.batch_numes


if __name__ == '__main__':
    dataset = Dataset('train')
    pbar = tqdm(dataset)
    for i in pbar:
        pbar.set_description('Processing:')
        print(i[0].shape)
        print(i[1].shape)
        print(i[2].shape)
        print(i[3].shape)
