from torch.utils.data import Dataset
import pandas as pd
import os
from torchvision import transforms
import glob
from PIL import Image


class SunDataset(Dataset):
    def __init__(self, root, size=128, split='train', year=2018, gray=True, delay=4):
        """
        Args:
            root(str): root path of dataset
            size(int): input size(w x h) of model, current size is (128,128)
            split(str): 'train','val' or 'test', case insensitive
            year(int): 2012~2019 useable only when `split='test'`, determine 
                        which year you want to eval the model
            gary(bool): determine using gray scale input or RGB input
        """
        super(SunDataset, self).__init__()
        self.root = root
        self.split = split.upper()
        self.year = year
        self.delay = delay
        self.gray = 'L' if gray==True else 'RGB'

        df = pd.read_csv(os.path.join(
            root, 're_sampled_train_{}days.csv').format(delay), header=[0])
        train_size = len(df)//10 * 8

        if gray:
            mean = [0.45]
            std = [0.22]
            self.gray = 'L'
        else:
            mean = [0.485, 0.456, 0.406]
            std = [0.229, 0.224, 0.225]
            self.gray = 'RGB'

        if self.split == 'TRAIN':
            self.len = train_size
            self.img_names = list(df.iloc[:train_size, 0])
            self.labels = df.iloc[:train_size, 1].values

            self.trans = transforms.Compose([
                transforms.Resize(size=(size, size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=mean, std=std)
            ])
        elif self.split == 'VAL':
            self.len = len(df) - train_size
            # self.len = 100
            self.img_names = list(df.iloc[train_size:, 0])
            self.labels = df.iloc[train_size:, 1].values

            self.trans = transforms.Compose([
                transforms.Resize(size=(size, size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=mean, std=std)
            ])
        else:
            df = pd.read_csv(os.path.join(
                root, 'train_{}days.csv'.format(self.delay)), header=[0])
            df.dropna(inplace=True)
            self.img_names = []
            self.labels = []
            for index,line in enumerate(df.iloc[:,0]):
                # import pdb;pdb.set_trace()
                if line[:4] == str(self.year):
                    self.img_names.append(line)
                    self.labels.append(df.iloc[index, 1])

            self.len = len(self.img_names)
            # self.img_names = list(df.iloc[:, 0])
            # self.labels = df.iloc[:, 1].values
            self.trans = transforms.Compose([
                transforms.Resize(size=(size, size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=mean, std=std)
            ])
        print("load {} {} items".format(self.len, self.split))

    def __len__(self):
        return self.len

    def __getitem__(self, idx):

        img_name = self.img_names[idx]
        path = os.path.join(self.root, 'train_imgs', img_name)
        # print(path, os.path.exists(path))
        label = self.labels[idx]

        # print(path)
        img = Image.open(path).convert(self.gray)

        img = self.trans(img)

        return img, label


def data_collect(batch):
    # `batch` is a list of tuple where first element is image tensor and
    # second element is corresponding label
    pass


if __name__ == "__main__":
    train_data = SunDataset("D:/datasets/SunData", size=224, split='train',gray=False)
    val_data = SunDataset("D:/datasets/SunData", size=224, split='val',gray=False)
    test_data = SunDataset("D:/datasets/SunData", split='test', year=2012, size=224, gray=False)
