from PIL import Image
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import os
import numpy as np
import imgaug.augmenters as iaa
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
import torch.nn.functional as F

class ImgAug(object):
    def __init__(self):
        self.augmentations = iaa.Sequential([
            iaa.PadToAspectRatio(
                1.0,
                position="center-center").to_deterministic(),
        ])

    def __call__(self, data):
        img, seg = data
        segmentation_mask = SegmentationMapsOnImage(seg, shape=img.shape)
        # img, segmentation_mask = self.augmentations(
        #     image=img,
        #     segmentation_maps=segmentation_mask)
        seg = segmentation_mask.get_arr()
        return img, seg

class ToTensor(object):
    def __init__(self, ):
        pass

    def __call__(self, data):
        img, seg = data
        img = transforms.ToTensor()(img)
        seg = transforms.ToTensor()(seg) * 255
        return img, seg


class Resize(object):
    def __init__(self, size=416):
        self.size = size

    def __call__(self, data):
        img, seg = data
        img = F.interpolate(img.unsqueeze(0), size=self.size, mode="nearest").squeeze(0)
        seg = F.interpolate(seg.unsqueeze(0), size=self.size, mode="nearest").squeeze(0)
        return img, seg

DEFAULT_TRANSFORMS = transforms.Compose([
    ImgAug(),
    ToTensor(),
    Resize((180,240)),
])

class My_dataset(Dataset):
    def __init__(self,datasets_path="../SegNet-Tutorial/CamVid",datasets_type="train"):
        super().__init__()
        self.datasets_path = datasets_path
        self.datasets_type = datasets_type
        self.txt_file = os.path.join(datasets_path,datasets_type+".txt")
        self.txt = open(self.txt_file,"r+")
        self.img_list = []
        self.label_list = []
        for line in self.txt.readlines():
            img_path,label_path = line.split(" ")
            img_path = img_path.strip("\n")
            label_path = label_path.strip("\n")
            self.img_list.append(img_path)
            self.label_list.append(label_path)
        
        # self.img_list = self.img_list[0:len(self.img_list):100]
        # self.label_list = self.label_list[0:len(self.label_list):100]

    def __getitem__(self, index):
        img_path = self.img_list[index]
        label_path = self.label_list[index]
        img = np.array(Image.open(os.path.join(self.datasets_path,img_path)).convert('RGB'))
        label = np.array(Image.open(os.path.join(self.datasets_path,label_path)).convert('L'))
        img,label = DEFAULT_TRANSFORMS((img,label))
        return img,label

    def __len__(self):
        return len(self.img_list)
 
if __name__ == "__main__":
    datasets_path = "../SegNet-Tutorial/CamVid"
    data_train = My_dataset(datasets_path,datasets_type="train")
    data_test = My_dataset(datasets_path,datasets_type="test")
    data_loader_train = DataLoader(data_train, batch_size=5, shuffle=True,num_workers=4)
    data_loader_test = DataLoader(data_test, batch_size=5, shuffle=False,num_workers=4)
    for i_batch, batch_data in enumerate(data_loader_train):
        print(i_batch)
        print(batch_data[0].shape)
        print(batch_data[1].shape)
        break
