import os.path

import cv2
import numpy as np
from torch.utils.data.dataset import Dataset
from torch.utils.data.dataloader import DataLoader
import torch
from imgaug.augmentables.segmaps import SegmentationMapsOnImage  # conda install imgaug
import imgaug.augmenters as iaa


class SegmentationDataset(Dataset):
    def __init__(self, where='train', seq=None):
        self.where = where
        self.seq = seq

        self.img_path = os.path.join("./dataset", where, "img_preprocessed")
        self.mask_path = os.path.join("./dataset", where, "GT_preprocessed")

        self.img_list = os.listdir(path=self.img_path)
        self.mask_list = os.listdir(path=self.mask_path)

        self.img_list.sort()
        self.mask_list.sort()

    def __len__(self):
        return len(self.img_list)

    def __getitem__(self, index):
        img_file = self.img_list[index]
        mask_file = self.mask_list[index]

        img = np.load(os.path.join(self.img_path, img_file))
        mask = np.load(os.path.join(self.mask_path, mask_file))

        img = cv2.resize(img, (512, 512))
        mask = cv2.resize(mask, (512, 512))

        if self.seq:
            segmap = SegmentationMapsOnImage(mask, shape=mask.shape)
            img, mask = self.seq(image=img, segmentation_maps=segmap)
            mask = mask.get_arr()

        # 图像的三维 分别是 高 宽 通道，转变为网络中常用的 通道 高 宽，
        # 而标签是一个灰度图，他只有高，宽，所以给最前面添加一个通道数=1
        return (
            torch.Tensor(img.transpose(2, 0, 1).copy()),
            torch.Tensor(np.expand_dims(mask, axis=0).copy()),
            img_file
        )


if __name__ == '__main__':
    # 数据增强
    seq = iaa.Sequential([
        iaa.Affine(
            scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
            translate_percent=({"x": (-0.1, 0.1), "y": (-0.1, 0.1)}),
            rotate=(0, 360)
        ),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Sometimes(0.3, iaa.Affine(rotate=90)),
        iaa.Sometimes(0.3, iaa.Affine(rotate=180)),
        iaa.Sometimes(0.3, iaa.Affine(rotate=270))
    ])

    train_dataset = SegmentationDataset(where='train', seq=seq)
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=1)

    for i, (x, y, name) in enumerate(train_loader):
        print(x.shape)
        print(y.shape)
        print(name)
        break
