""" COCO dataset (quick and dirty)

Hacked together by Ross Wightman
"""
from path import *
from config import Config as cfg
from xml_reader import BoxHandler

import torch.utils.data as data

import os
from xml.sax import make_parser
from random import shuffle
import pandas as pd
from PIL import Image


def split_train_val(csv_path='', train_ratio=0.9):
    assert 0 < train_ratio < 1
    assert 'train' in os.path.basename(csv_path)

    data_path = os.path.dirname(csv_path)
    csv_file = pd.read_csv(csv_path)
    n_pair = csv_file.shape[0]
    idx_list = list(range(n_pair))
    shuffle(idx_list)

    n_train = int(n_pair * train_ratio)
    train_idx, val_idx = idx_list[:n_train], idx_list[n_train:]
    train_df, val_df = csv_file.iloc[train_idx], csv_file.iloc[val_idx]
    train_df.to_csv(os.path.join(data_path, 'split_train.csv'), index=None)
    val_df.to_csv(os.path.join(data_path, 'split_val.csv'), index=None)


class CocoDetection(data.Dataset):
    """`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
    Args:
        root (string): Root directory where images are downloaded to.
        ann_file (string): Path to json annotation file.
        transform (callable, optional): A function/transform that  takes in an PIL image
            and returns a transformed version. E.g, ``transforms.ToTensor``

    """

    def __init__(self, split='', data_path=DATA_PATH, transform=None):
        super(CocoDetection, self).__init__()

        assert split in ['train', 'val', 'test']
        self.split = split
        self.data_path = data_path
        self.transform = transform
        self.yxyx = True  # expected for TF model, most PT are xyxy
        self.cfg = cfg

        self.csv_path = self._get_csv_path()
        self.img_paths, self.xml_paths = self._load_img_xml_pair()  # e.g. 'image/2518.jpg', 'xml/369.xml'
        self.imgs, self.img_sizes, self.img_ids = self._load_images()
        self.boxes = self._load_boxes()
        self.len = len(self.img_ids)

    def _get_csv_path(self):
        """
        Make sure train.csv is split into 'split_train.csv' or 'split_val.csv'.
        """
        if self.split == 'test':
            return os.path.join(self.data_path, 'validation.csv')
        else:  # 'train' or 'val'
            # return 'split_train.csv' or 'split_val.csv'
            if f'split_{self.split}.csv' not in os.listdir(self.data_path):
                # split train-val data
                split_train_val(csv_path=os.path.join(self.data_path, 'train.csv'), train_ratio=self.cfg.train_ratio)
            return os.path.join(self.data_path, f'split_{self.split}.csv')

    def _load_img_xml_pair(self):
        """
        return:
            img_paths: [n_images] 'image/x.jpg'
            xml_paths: [n_images] 'xml/y.xml'
        """
        csv_file = pd.read_csv(self.csv_path).values
        img_paths = [os.path.join(self.data_path, _) for _ in csv_file[:, 0]]
        xml_paths = [os.path.join(self.data_path, _) for _ in csv_file[:, 1]]

        return img_paths, xml_paths

    def _load_images(self):
        """
        return:
            imgs: [n_images]
            img_sizes: [n_images]  (width, height)
            img_ids: [n_images]
        """
        imgs = []
        img_sizes = []
        img_ids = []
        # imgs = np.zeros((len(self.img_paths), self.cfg.height, self.cfg.width, 3))

        for i, img_path in enumerate(self.img_paths):
            img_id = os.path.basename(img_path).split('.')[0]
            img = Image.open(img_path).convert('RGB')
            img_ids.append(img_id)
            imgs.append(img)
            img_sizes.append(img.size)
            # img = cv2.imread(img_path, 1).astype(np.float)
            # img = cv2.resize(img, (self.cfg.width, self.cfg.height), interpolation=cv2.INTER_LINEAR)
            # img = cv2.transpose(img, (2, 0, 1))  # RGB
            # imgs[i] = img

        return imgs, img_sizes, img_ids

    def _load_boxes(self):
        """
        return:
            boxes: [n_images, [n_box, (xm, ym, xM, yM)]] or [n_images, [n_box, (xm, ym, xM, yM)]]
        """
        parser = make_parser()
        handler = BoxHandler()
        parser.setContentHandler(handler)
        boxes = []
        for i, xml_path in enumerate(self.xml_paths):
            parser.parse(xml_path)
            box = handler.boxes  # [n_box, (xm, ym, xM, yM)]
            if self.yxyx:
                boxes.append([[_[1], _[0], _[3], _[2]] for _ in box])
            else:
                boxes.append(box)

        return boxes

    def __getitem__(self, index):
        """
        Args:
            index (int): Index
        Returns:
            tuple: Tuple (image, annotations (target)).
        """
        img = self.imgs[index]
        img_size = self.img_sizes[index]
        img_id = self.img_ids[index]
        box = self.boxes[index]

        ann = dict(img_id=img_id,
                   bbox=box,
                   cls=1,
                   img_size=img_size)

        if self.transform is not None:
            img, ann = self.transform(img, ann)

        return img, ann

    def __len__(self):
        return self.len


if __name__ == '__main__':
    from loader import create_loader
    dataset = CocoDetection('train', r"F:\\FlyAI\\TBDetection_FlyAI\\data\\input\TBDetection")
    create_loader(
        dataset,
        input_size=640,
        batch_size=4,
        use_prefetcher=True,
        interpolation='bilinear',
        fill_color='mean',
        num_workers=2,
        pin_mem=True
    )