import torch
from torch.utils.data import Dataset
import json
import os
from PIL import Image
from depthaware.utils.utils import transform
import numpy as np
import torchvision.transforms.functional as FT

def make_dataset_fromlst(listfilename):
    """
    NYUlist format:
    imagepath seglabelpath depthpath HHApath
    """
    images = []
    boxs = []
    lables = []
    depths = []

    with open(listfilename) as f:
        content = f.readlines()
        for x in content:
            imgname, depthname, lablesname = x.strip().split(' ')
            images += [imgname]
            lables += [lablesname]
            depths += [depthname]

    return {'images':images, 'boxs':boxs, 'lables':lables, 'depths':depths}

class nyuSSDDataset(Dataset):
    """
    A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
    """

    def __init__(self, opt, split):
        """
        :param data_folder: folder where data files are stored
        :param split: split, one of 'TRAIN' or 'TEST'
        :param keep_difficult: keep or discard objects that are considered difficult to detect?
        """
        self.split = split.upper()

        assert self.split in {'TRAIN', 'TEST'}

        self.opt = opt

        self.paths_dict = make_dataset_fromlst(opt.list)
        self.len = len(self.paths_dict['images'])
        # Read data files


    def __getitem__(self, i):
        # Read image
        image = Image.open(self.paths_dict['images'][i], mode='r')
        image = image.convert('RGB')


        lables = self.paths_dict['lables'][i]
        boxes = []
        clas = []
        with open(lables, 'r') as f:
            for data in f.readlines():
                dataspled = data.split(' ')
                dataspled[-1] = dataspled[-1].replace('\n', '')
                dataspled = [float(num) for num in dataspled]
                boxes.append(dataspled[1:])
                clas.append(dataspled[0])
                
        boxes = torch.FloatTensor(boxes)  # (n_objects, 4)
        clas = torch.LongTensor(clas)  # (n_objects)

        depth = Image.open(self.paths_dict['depths'][i])

        difficults = torch.zeros(clas.size())

        # Apply transformations
        image, boxes, clas, depth = transform(image, boxes, clas, depth, split=self.split)


        return image, boxes, clas, depth, difficults

    def __len__(self):
        return self.len

    def collate_fn(self, batch):
        """
        Since each image may have a different number of objects, we need a collate function (to be passed to the DataLoader).

        This describes how to combine these tensors of different sizes. We use lists.

        Note: this need not be defined in this Class, can be standalone.

        :param batch: an iterable of N sets from __getitem__()
        :return: a tensor of images, lists of varying-size tensors of bounding boxes, labels, and difficulties
        """

        images = list()
        boxes = list()
        clas = list()
        depths = list()
        difficults = list()

        for b in batch:
            images.append(b[0])
            boxes.append(b[1])
            clas.append(b[2])
            depths.append(b[3])
            difficults.append(b[4])

        images = torch.stack(images, dim=0)
        depths = torch.stack(depths, dim=0)


        return images, boxes, clas, depths, difficults  # tensor (N, 3, 300, 300), 3 lists of N tensors each
