# -*- coding: utf-8 -*-

import os
import xml.etree.ElementTree as ET
import PIL.Image as Image
import numpy as np
import torch
import torch.utils.data as torch_data
import torchvision.transforms as tv_transforms

import augmentation.obj_detect_aug as aug


class AnnotationTransform(object):
    def __init__(self, keep_difficult=False):
        self.classes = [
            "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car",
            "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
            "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
        ]
        self.keep_difficult = keep_difficult

    def read_annotation_xml_file(self, xml_file_path):
        """Read VOC annotation file which is xml format

        Parameters
        ----------
        xml_file_path : string
            annotation file system path

        Returns
        -------
        list of list of 5 elements
            A list of annotations, each annotation format: [class_id, c_x, c_y, w, h]
                c_x, c_y, w, h is darknet format
        """
        annotations = list()
        root = ET.parse(xml_file_path).getroot()
        # step 1: get image width and height
        size_node = root.find('size')
        width = int(size_node.find('width').text)
        height = int(size_node.find('height').text)
        # step 2: iterative all objects
        for obj in root.iter('object'):
            difficult = int(obj.find('difficult').text) == 1
            if difficult and self.keep_difficult is False:
                continue
            annotation = list()
            name = obj.find('name').text.lower().strip()
            annotation.append(self.classes.index(name))
            bounding_box_node = obj.find('bndbox')
            # minus 1 for 0 based
            x_min = int(bounding_box_node.find('xmin').text) - 1
            y_min = int(bounding_box_node.find('ymin').text) - 1
            x_max = int(bounding_box_node.find('xmax').text) - 1
            y_max = int(bounding_box_node.find('ymax').text) - 1
            # step 3: change to darknet format
            annotation.append((x_min + x_max) / 2 / width)
            annotation.append((y_min + y_max) / 2 / height)
            annotation.append((x_max - x_min + 1) / width)
            annotation.append((y_max - y_min + 1) / height)
            annotations.append(annotation)
        return annotations

    def __call__(self, xml_file_path):
        return self.read_annotation_xml_file(xml_file_path)


class VOCDataset(torch_data.Dataset):
    """
    VOC2007 and VOC2012 Dataset.
    organize like this:
        root_folder
            VOC2007
                Annotations
                JPEGImages
            VOC2012
                Annotations
                JPEGImages
    """
    def __init__(self, root_folder):
        """
        Parameters
        ----------
        root_folder : string
            root folder system path
        """
        super(VOCDataset, self).__init__()
        self.image_transform = tv_transforms.Compose([
            tv_transforms.ToTensor()
        ])
        self.annotation_transform = AnnotationTransform(keep_difficult=False)
        self.image_and_annotation_file_path_pairs = \
            self._get_image_and_annotation_file_path_pairs(root_folder)

    @staticmethod
    def _get_image_and_annotation_file_path_pairs(root_folder):
        image_file_paths, annotation_file_paths = list(), list()

        # voc mini
        voc_mini_folder = os.path.join(root_folder, 'VOC_mini')
        image_folder = os.path.join(voc_mini_folder, 'JPEGImages')
        annotation_folder = os.path.join(voc_mini_folder, 'Annotations')
        image_names = [name for name in os.listdir(image_folder) if 'jpg' in name]
        annotation_names = [name.replace('.jpg', '.xml') for name in image_names]
        image_file_paths.extend([os.path.join(image_folder, name) for name in image_names])
        annotation_file_paths.extend(
            [os.path.join(annotation_folder, name) for name in annotation_names])

        # voc 2007
        # voc_2007_folder = os.path.join(root_folder, 'VOC2007')
        # image_folder = os.path.join(voc_2007_folder, 'JPEGImages')
        # annotation_folder = os.path.join(voc_2007_folder, 'Annotations')
        # image_names = [name for name in os.listdir(image_folder) if 'jpg' in name]
        # annotation_names = [name.replace('.jpg', '.xml') for name in image_names]
        # image_file_paths.extend([os.path.join(image_folder, name) for name in image_names])
        # annotation_file_paths.extend(
        #     [os.path.join(annotation_folder, name) for name in annotation_names])

        # voc 2012
        # voc_2012_folder = os.path.join(root_folder, 'VOC2012')
        # image_folder = os.path.join(voc_2012_folder, 'JPEGImages')
        # annotation_folder = os.path.join(voc_2012_folder, 'Annotations')
        # image_names = [name for name in os.listdir(image_folder) if 'jpg' in name]
        # annotation_names = [name.replace('.jpg', '.xml') for name in image_names]
        # image_file_paths.extend([os.path.join(image_folder, name) for name in image_names])
        # annotation_file_paths.extend(
        #     [os.path.join(annotation_folder, name) for name in annotation_names])
        return list(zip(image_file_paths, annotation_file_paths))

    @staticmethod
    def _data_aug(image, annotations):
        # augmentation 1: rescale image such that its shorter side is 600 pixels
        target_size, max_size = 600, 1000
        min_dim = min(image.shape[1], image.shape[0])
        max_dim = max(image.shape[1], image.shape[0])
        scale = target_size / min_dim
        if scale * max_dim > max_size:  # at most 1000 pixels
            scale = max_size / max_dim
        image, annotations = aug.resize(image, annotations,
                                        dst_size=(int(image.shape[0]*scale), int(image.shape[1]*scale)))

        # augmentation 2: random shift
        # horizontal_shift_ratio = np.random.uniform(-0.2, 0.2)
        # vertical_shift_ratio = np.random.uniform(-0.2, 0.2)
        # image, annotations = ag.shift(image, annotations, horizontal_shift_ratio, vertical_shift_ratio)

        # augmentation 3: random crop
        # crop_ratio = np.random.uniform(0.6, 1)
        # dst_size = (int(image.shape[0] * crop_ratio), int(image.shape[1] * crop_ratio))
        # image, annotations = ag.random_crop(image, annotations, dst_size)

        # augmentation 4: horizontal random flip
        image, annotations = aug.random_horizontal_flip(image, annotations)
        return image, annotations

    def __len__(self):
        return len(self.image_and_annotation_file_path_pairs)

    def __getitem__(self, index):
        image_file_path, annotation_file_path = self.image_and_annotation_file_path_pairs[index]
        image = np.array(Image.open(image_file_path).convert('RGB'))
        annotations = self.annotation_transform(annotation_file_path)
        image, annotations = self._data_aug(image, annotations)
        annotations = np.array(annotations, dtype=np.float32)
        image = self.image_transform(image.copy())
        annotations = torch.from_numpy(annotations)
        return image, annotations
