import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import albumentations as A
from PIL import Image
from albumentations.pytorch.transforms import ToTensorV2
import matplotlib.pyplot as plt
import h5py
from torchvision.utils import make_grid
from PIL import Image
from PIL import ImageFilter
from scipy import ndimage
from scipy.ndimage import zoom
from torchvision import transforms
import random

class ACDC(Dataset):

    PALETTE = np.array([
        [0, 0, 0],
        [0, 0, 255],
        [0, 255, 0],
        [255, 0, 0],
    ])

    def __init__(self, root=r"E:\note\ssl\data\ACDC", split="train", transform=None):
        super(ACDC, self).__init__()
        self.split = split
        self.root = root
        self.transform = transform
        self.img_dir = []
        self.ann_dir = []
        self.sample_list = []
        self.load_annotations()  # 加载文件路径

    def __len__(self):
        return len(self.sample_list)

    def __getitem__(self, idx):
        case = self.sample_list[idx]
        h5f = h5py.File(case, "r")

        image = np.array(h5f["image"][:],dtype=np.float32)
        mask = np.array(h5f["label"][:],dtype=np.uint8)

        if self.transform is not None and self.split == "train":
            result = self.transform(image=image, mask=mask)
            image = result["image"]
            mask = result["mask"]

        return image, mask

    def label_to_img(self, label):
        if isinstance(label, torch.Tensor):
            label = label.numpy()
        if not isinstance(label, np.ndarray):
            label = np.array(label)
        label = label.astype(np.uint8)
        label[label == 255] = 0
        img = self.PALETTE[label]
        if len(img.shape) == 4:
            img = torch.tensor(img).permute(0, 3, 1, 2)
            img = make_grid(tensor=img, nrow=2, scale_each=True)
            img = img.permute(1, 2, 0).numpy()

        return img.astype(np.uint8)

    def load_annotations(self):
        if self.split == "train":
            with open(self.root + "/train_slices.list", "r") as f1:
                self.sample_list = f1.readlines()
            self.sample_list = [item.replace("\n", "")
                                for item in self.sample_list]
            self.sample_list = [self.root + "/data/slices/{}.h5".format(item)
                                for item in self.sample_list]

        elif self.split == "val":
            with open(self.root + "/val.list", "r") as f:
                self.sample_list = f.readlines()
            self.sample_list = [item.replace("\n", "")
                                for item in self.sample_list]
            self.sample_list = [self.root + "/data/{}.h5".format(item)
                                for item in self.sample_list]

        print("total {} samples".format(len(self.sample_list)))


def random_rot_flip(image, label):
    k = np.random.randint(0, 4)
    image = np.rot90(image, k)
    label = np.rot90(label, k)
    axis = np.random.randint(0, 2)
    image = np.flip(image, axis=axis).copy()
    label = np.flip(label, axis=axis).copy()
    return image, label


def random_rotate(image, label):
    angle = np.random.randint(-20, 20)
    image = ndimage.rotate(image, angle, order=0, reshape=False)
    label = ndimage.rotate(label, angle, order=0, reshape=False)
    return image, label

def color_jitter(image):
    if not torch.is_tensor(image):
        np_to_tensor = transforms.ToTensor()
        image = np_to_tensor(image)

    # s is the strength of color distortion.
    s = 1.0
    jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
    return jitter(image)

class RandomGenerator(object):
    def __init__(self, output_size):
        self.output_size = output_size

    def __call__(self, image,mask):
        if random.random() > 0.5:
            image, mask = random_rot_flip(image, mask)
        elif random.random() > 0.5:
            image, mask = random_rotate(image, mask)
        x, y = image.shape
        image = zoom(
            image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
        mask = zoom(
            mask, (self.output_size[0] / x, self.output_size[1] / y), order=0)
        image = torch.from_numpy(
            image.astype(np.float32)).unsqueeze(0)
        mask = torch.from_numpy(mask.astype(np.uint8))
        sample = {'image': image, 'mask': mask}
        return sample

def get_acdc_loader(root=r'/home/ubuntu/data/ACDC', batch_size=4, train_crop_size=(224, 224)):
    """

    :param root:
    :param batch_size: 批次大小
    :param label: 有标签的数量
    :return:
    """
    # train_transform = A.Compose([
    #     A.RandomResizedCrop(height=train_crop_size[0], width=train_crop_size[1],scale=(0.5, 2.0)),
    #     # A.ColorJitter(0.4,0.4,0.4,p=0.5),
    #     # A.CoarseDropout(max_holes=8, p=0.5),
    #     A.OneOf([
    #         A.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=(-0.5, 0.5), p=0.9),
    #         A.RandomGamma(gamma_limit=(50, 200), p=0.8),
    #     ]),
    #     A.OneOf([
    #         A.ShiftScaleRotate(p=0.6),
    #         A.HorizontalFlip(p=0.8),
    #         A.VerticalFlip(p=0.8)
    #     ]),
    #     ToTensorV2()
    # ])

    train_transform=RandomGenerator(train_crop_size)
    
    train_dataset = ACDC(root=root, split="train", transform=train_transform)
    test_dataset = ACDC(root=root, split="val")
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=4, shuffle=True, drop_last=True)
    test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=4, shuffle=False)

    return train_dataloader, test_dataloader


def show(im):
    im = im.numpy().squeeze()
    fig=plt.figure()
    plt.imshow(im, cmap="gray")
    plt.show()
    fig.savefig("result.png")


def show_label(mask, path="label.jpg"):
    plt.figure()
    plt.imshow(mask)
    plt.show()
    Image.fromarray(mask).save(path)


if __name__ == '__main__':

    train_dataloader, test_dataloader = get_acdc_loader()
    # print(len(train_dataloader))
    # print(len(test_dataloader))
    # print(len(test_dataloader.dataset))
    for image, label in train_dataloader:
        print(image.shape)
        print(label.shape)
        print(np.unique(label.numpy()))
        show(image[0])
        show_label(train_dataloader.dataset.label_to_img(label))
        break

    for sample in test_dataloader:
        image, label = sample
        print(image.shape)
        print(label.shape)
        print(np.unique(label.numpy()))
        # show(image[0])
        # show_label(label[0].numpy())
        break
