"""
implementation for pascal voc dataset format
"""

import os
import cv2
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET

from vortex.data.augment import transform as ssd_transform
from vortex.data.labels import VOC_LABELS


class PascalVOCDataset(data.Dataset):
    """
    train and val dataset are mixed together.
    """
    def __init__(self, data_root, split, transform=None, labels=None, bg0=True):
        # load image and annotations
        text_path = os.path.join(data_root, 'ImageSets', 'Main', '{}.txt'.format(split))
        with open(text_path, 'r') as f:
            indices = [l.strip() for l in f.readlines()]
        self.image_paths = [os.path.join(data_root, 'JPEGImages', '{}.jpg'.format(index)) for index in indices]
        self.annot_paths = [os.path.join(data_root, 'Annotations', '{}.xml'.format(index)) for index in indices]

        # load label_map
        self.label_map = {}
        if labels is None or len(labels) == 0:
            labels = VOC_LABELS

        for idx, class_name in enumerate(labels):
            class_id = idx + 1 if bg0 else idx
            self.label_map[class_name] = class_id
        if bg0:
            self.label_map['background'] = 0
        
        self.split = split
        self.transform = transform
    
    def __getitem__(self, index):
        image_path = self.image_paths[index]
        annot_path = self.annot_paths[index]
        image = Image.open(image_path, mode='r')
        image = image.convert('RGB')

        boxes, labels = self.load_annotation(annot_path)
        boxes = torch.FloatTensor(boxes)
        labels = torch.LongTensor(labels)

        if self.transform is None:
            image, boxes, labels = ssd_transform(image, boxes, labels, self.split)
        else:
            # convert to percent coords
            width = image.size[0]
            height = image.size[1]
            boxes[:, [0, 2]] /= width
            boxes[:, [1, 3]] /= height
            
            image, boxes, labels = self.transform(image, boxes, labels)
        
        return image, boxes, labels, image_path

    def __len__(self):
        return len(self.image_paths)

    def collate_fn(self, batch):
        images = []
        boxes = []
        labels = []
        paths = []
        for b in batch:
            images.append(b[0])
            boxes.append(b[1])
            labels.append(b[2])
            paths.append(b[3])
        images = torch.stack(images, dim=0)

        return images, boxes, labels, paths
    
    def load_annotation(self, annotation_path):
        tree = ET.parse(annotation_path)
        root = tree.getroot()
        boxes = []
        labels = []
        for obj in root.iter('object'):
            class_name = obj.find('name').text.lower().strip()
            
            bbox = obj.find('bndbox')
            xmin = int(bbox.find('xmin').text) - 1
            ymin = int(bbox.find('ymin').text) - 1
            xmax = int(bbox.find('xmax').text) - 1
            ymax = int(bbox.find('ymax').text) - 1

            class_id = self.label_map[class_name]
            boxes.append([xmin, ymin, xmax, ymax])
            labels.append(class_id)
        return np.array(boxes), np.array(labels)


if __name__ == '__main__':
    dataset = PascalVOCDataset(r'F:\Data\VOCdevkit\VOC2007', 'trainval')
