import os
import os.path as osp
import sys
import random
import time
import cv2
import numpy as np
import torch.utils.data as data
from PIL import Image
from xml.dom.minidom import parse

if sys.version_info[0] == 2:
    import xml.etree.cElementTree as ET
else:
    import xml.etree.ElementTree as ET

VOC_CLASSES = (  # always index 0
    '__background__', 'aeroplane', 'bicycle', 'bird', 'boat',
    'bottle', 'bus', 'car', 'cat', 'chair',
    'cow', 'diningtable', 'dog', 'horse',
    'motorbike', 'person', 'pottedplant',
    'sheep', 'sofa', 'train', 'tvmonitor')

# note: if you used our download scripts, this should be right
# VOC_ROOT = osp.join('/home/toandm2', "data/VOCdevkit/")


class VOCAnnotationTransform(object):
    """Transforms a VOC annotation into a Tensor of bbox coords and label index
    Initilized with a dictionary lookup of classnames to indexes
    Arguments:
        class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
            (default: alphabetic indexing of VOC's 20 classes)
        keep_difficult (bool, optional): keep difficult instances or not
            (default: False)
        height (int): height
        width (int): width
    """

    def __init__(self, class_to_ind=None, keep_difficult=False):
        self.class_to_ind = dict(zip(VOC_CLASSES, range(len(VOC_CLASSES))))
        self.keep_difficult = keep_difficult

    def __call__(self, target):
        """
        Arguments:
            target (annotation) : the target annotation to be made usable
                will be an ET.Element
        Returns:
            a list containing lists of bounding boxes  [bbox coords, class name]
        """
        res = []
        for obj in target.iter('object'):
            difficult = int(obj.find('difficult').text) == 1
            if not self.keep_difficult and difficult:
                continue
            # name = obj.find('name').text.lower().strip()       #把所有的种类都小写了。用voc的时候注意一下。
            name = obj.find('name').text.strip()
            bbox = obj.find('bndbox')

            pts = ['xmin', 'ymin', 'xmax', 'ymax']
            bndbox = []
            for i, pt in enumerate(pts):
                cur_pt = float(bbox.find(pt).text) - 1
                # scale height or width
                # cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
                bndbox.append(cur_pt)
            label_idx = self.class_to_ind[name]
            bndbox.append(label_idx)
            res += [bndbox]  # [xmin, ymin, xmax, ymax, label_ind]
            # img_id = target.find('filename').text[:-4]
        return res  # [[xmin, ymin, xmax, ymax, label_ind], ... ]


class VOCDataset(data.Dataset):
    """VOC Detection Dataset Object
    input is image, target is annotation
    Arguments:
        root (string): filepath to VOCdevkit folder.
        image_set (string): imageset to use (eg. 'train', 'val', 'test')
        transform (callable, optional): transformation to perform on the
            input image
        target_transform (callable, optional): transformation to perform on the
            target `annotation`
            (eg: take in caption string, return tensor of word indices)
        dataset_name (string, optional): which dataset to load
            (default: 'VOC2007')
    """

    def __init__(self, root_dir, split, use_mosaic,
                 transform=None, target_transform=VOCAnnotationTransform(keep_difficult=True)):
                 # dataset_name='VOC0712'):
        self.root = root_dir
        self.split = split
        self.transform = transform
        self.target_transform = target_transform
        # self.name = dataset_name
        self._annopath = osp.join('%s', self.split, 'Annotations', '%s.xml')
        self._imgpath = osp.join('%s', self.split, 'JPEGImages', '%s.jpg')   #更改文件类型
        self.ids = list()
        self.use_mosaic = use_mosaic
        path = os.path.join(self.root, self.split , 'Annotations')
        filenames = os.listdir(path)
        for file in filenames:
            file_path = os.path.join(path,file)         #判断xml是否有object
            dom = ET.parse(file_path)
            root = dom.getroot()
            if root.find('object') is not None:
                id = file.split('.')[0]
                self.ids.append((self.root,id))

        # for line in open(osp.join(self.root, 'ImageSets', 'Main', self.split + '.txt')):
        #     self.ids.append((self.root, line.strip()))

    def __getitem__(self, idx):
        if self.use_mosaic:
            # if np.random.uniform(0, 1) < 0.5:  #对于一半的图片使用mosaic
            imgs, annots = [], []
            img_id = self.ids[idx]
            img = Image.open(self._imgpath % img_id)
            imgs.append(img)
            target = ET.parse(self._annopath % img_id).getroot()
            # print(self._imgpath % img_id)
            # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            # img = img.astype(np.float32) / 255.
            # print(img.size())
            # height, width, channels = img.shape
            if self.target_transform is not None:
                target = self.target_transform(target)
            annot = np.array(target)
            annots.append(annot)


            index_list, index = [idx], idx
            for _ in range(3):
                while index in index_list:
                    index = np.random.randint(0, len(self.ids))
                index_list.append(index)
                image_id = self.ids[index]
                img = Image.open(self._imgpath % image_id)
                imgs.append(img)
                target = ET.parse(self._annopath % image_id).getroot()
                annot = self.target_transform(target)
                annot = np.array(annot)
                annots.append(annot)

            # 第1，2，3，4张图片按顺时针方向排列，1为左上角图片，先计算出第2张图片的scale，然后推算出其他图片的最大resize尺寸，
            # 为了不让四张图片中某几张图片太小造成模型学习困难，scale限制为在0.25到0.75之间生成的随机浮点数。
            scale1 = np.random.uniform(0.2, 0.8)
            # height1, width1, _ = imgs[0].shape
            width1, height1 = imgs[0].size

            # imgs[0] = cv2.resize(imgs[0],
            #                      (int(width1 * scale1), int(height1 * scale1)))
            imgs[0] = imgs[0].resize((int(width1 * scale1), int(height1 * scale1)), Image.ANTIALIAS)

            max_height2, max_width2 = int(
                height1 * scale1), width1 - int(width1 * scale1)
            width2, height2 = imgs[1].size
            scale2 = max_height2 / height2
            if int(scale2 * width2) > max_width2:
                scale2 = max_width2 / width2
            imgs[1] = imgs[1].resize((int(width2 * scale2), int(height2 * scale2)), Image.ANTIALIAS)

            max_height3, max_width3 = height1 - int(
                height1 * scale1), width1 - int(width1 * scale1)
            width3, height3 = imgs[2].size
            scale3 = max_height3 / height3
            if int(scale3 * width3) > max_width3:
                scale3 = max_width3 / width3
            imgs[2] = imgs[2].resize((int(width3 * scale3), int(height3 * scale3)), Image.ANTIALIAS)

            max_height4, max_width4 = height1 - int(height1 * scale1), int(
                width1 * scale1)
            width4, height4 = imgs[3].size
            scale4 = max_height4 / height4
            if int(scale4 * width4) > max_width4:
                scale4 = max_width4 / width4
            imgs[3] = imgs[3].resize((int(width4 * scale4), int(height4 * scale4)), Image.ANTIALIAS)

            # 最后图片大小和原图一样
            final_image = np.zeros((height1, width1, 3))
            final_image[0:int(height1 * scale1),
            0:int(width1 * scale1)] = imgs[0]
            final_image[0:int(height2 * scale2),
            int(width1 * scale1):(int(width1 * scale1) +
                                  int(width2 * scale2))] = imgs[1]
            final_image[int(height1 * scale1):(int(height1 * scale1) +
                                               int(height3 * scale3)),
            int(width1 * scale1):(int(width1 * scale1) +
                                  int(width3 * scale3))] = imgs[2]
            final_image[int(height1 * scale1):(int(height1 * scale1) +
                                               int(height4 * scale4)),
            0:int(width4 * scale4)] = imgs[3]

            annots[0][:, :4] *= scale1
            annots[1][:, :4] *= scale2
            annots[2][:, :4] *= scale3
            annots[3][:, :4] *= scale4

            annots[1][:, 0] += int(width1 * scale1)
            annots[1][:, 2] += int(width1 * scale1)

            annots[2][:, 0] += int(width1 * scale1)
            annots[2][:, 2] += int(width1 * scale1)
            annots[2][:, 1] += int(height1 * scale1)
            annots[2][:, 3] += int(height1 * scale1)

            annots[3][:, 1] += int(height1 * scale1)
            annots[3][:, 3] += int(height1 * scale1)

            final_annot = np.concatenate(
                (annots[0], annots[1], annots[2], annots[3]), axis=0)

            sample = {'image': final_image, 'annot': final_annot}
        else:
            img_id = self.ids[idx]
            target = ET.parse(self._annopath % img_id).getroot()
            img = Image.open(self._imgpath % img_id)

            # print(self._imgpath % img_id)

            # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            # img = img.astype(np.float32) / 255.
            # print(img.size())
            # height, width, channels = img.shape
            if self.target_transform is not None:
                target = self.target_transform(target)
            annot = np.array(target)
            sample = {'image': img, 'annot': annot}
        if self.transform is not None:
            sample = self.transform(sample)
        return sample

    def __len__(self):
        return len(self.ids)

    def num_classes(self):
        return len(VOC_CLASSES)

    def label_to_name(self, label):
        return VOC_CLASSES[label]

    def load_annotations(self, index):
        img_id = self.ids[index]
        anno = ET.parse(self._annopath % img_id).getroot()
        gt = self.target_transform(anno, 1, 1)
        gt = np.array(gt)
        return gt


if __name__ == "__main__":
    from dataset.augment import Normalizer, Augmenter, Resizer , SmallObjectAugmentation
    import torchvision.transforms as transforms

    dataset = VOCDataset(root_dir=r'/Users/guojianpeng/desktop/VOC0712',
                         split='test',
                         transform=transforms.Compose(
                             [Normalizer(), Augmenter(), SmallObjectAugmentation, Resizer(common_size=512)]))
    # val_loader = DataLoader(val_dataset,
    #                         batch_size=4,
    #                         shuffle=False,
    #                         collate_fn=collater,
    #                         num_workers=4,
    #                         pin_memory=True)
    # for epoch_step, (images, annotations) in enumerate(val_loader):
    # print(dataset[0]['image'][:3,:3,:1])
    # print(dataset[0]['annot'])





