import random
import numpy as np
from PIL import Image
from PIL import ImageFilter
from scipy import ndimage
from scipy.ndimage import zoom
import matplotlib.pyplot as plt
import h5py
from torchvision.utils import make_grid
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler
from torchvision import transforms
import torch
import itertools
from copy import deepcopy
import os


class ACDC_UniMatch(Dataset):

    PALETTE = np.array([
        [0, 0, 0],
        [0, 0, 255],
        [0, 255, 0],
        [255, 0, 0],
    ])

    def __init__(self, root=r"E:\note\ssl\data\ACDC", split="train_l", label_num=137, transform=None):

        super(ACDC_UniMatch, self).__init__()
        self.split = split
        self.root = root
        self.transform = transform
        self.sample_list = []
        self.label_num = label_num
        self.load_annotations()  # 加载文件路径
        print("total {} samples".format(len(self.sample_list)))

    def __len__(self):
        return len(self.sample_list)

    def __getitem__(self, idx):
        case = self.sample_list[idx]
        h5f = h5py.File(case, "r")

        image = np.array(h5f["image"][:], dtype=np.float32)
        mask = np.array(h5f["label"][:], dtype=np.uint8)

        if self.transform is not None and (self.split == "train_u" or self.split == "train_l"):
            result = self.transform(image=image, mask=mask)
            image = result["image"]
            mask = result["mask"]

            if self.split == "train_u":
                img_s1 = result["img_s1"]
                img_s2 = result["img_s2"]
                size = mask.shape[-1]
                cutmix_box1 = obtain_cutmix_box(size, p=0.5)
                cutmix_box2 = obtain_cutmix_box(size, p=0.5)
                return image, img_s1, img_s2, cutmix_box1, cutmix_box2

        return image, mask

    def label_to_img(self, label):
        if isinstance(label, torch.Tensor):
            label = label.numpy()
        if not isinstance(label, np.ndarray):
            label = np.array(label)
        label = label.astype(np.uint8)
        label[label == 255] = 0
        img = self.PALETTE[label]
        if len(img.shape) == 4:
            img = torch.tensor(img).permute(0, 3, 1, 2)
            img = make_grid(tensor=img, nrow=2, scale_each=True)
            img = img.permute(1, 2, 0).numpy()

        return img.astype(np.uint8)

    def load_annotations(self):
        if self.split == "train_l" or self.split == "train_u":
            with open(self.root + "/train_slices.list", "r") as f1:
                self.sample_list = f1.readlines()
            self.sample_list = [item.replace("\n", "") for item in self.sample_list]
            self.sample_list = [self.root + "/data/slices/{}.h5".format(item) for item in self.sample_list]
            self.sample_list = np.array(self.sample_list)
        
            if self.split == "train_l":
                idxs = np.array(range(0, self.label_num))
            elif self.split == "train_u":
                idxs = np.array(range(self.label_num, len(self.sample_list)))

            self.sample_list = self.sample_list[idxs]

        elif self.split == "val":
            with open(self.root + "/val.list", "r") as f:
                self.sample_list = f.readlines()
            self.sample_list = [item.replace("\n", "") for item in self.sample_list]
            self.sample_list = [self.root + "/data/{}.h5".format(item) for item in self.sample_list]
        else:
            with open(self.root + "/test.list", "r") as f:
                self.sample_list = f.readlines()
            self.sample_list = [item.replace("\n", "") for item in self.sample_list]
            self.sample_list = [self.root + "/data/{}.h5".format(item) for item in self.sample_list]

        self.sample_list = np.array(self.sample_list)



def random_rot_flip(image, label):
    k = np.random.randint(0, 4)
    image = np.rot90(image, k)
    label = np.rot90(label, k)
    axis = np.random.randint(0, 2)
    image = np.flip(image, axis=axis).copy()
    label = np.flip(label, axis=axis).copy()
    return image, label


def random_rotate(image, label):
    angle = np.random.randint(-20, 20)
    image = ndimage.rotate(image, angle, order=0, reshape=False)
    label = ndimage.rotate(label, angle, order=0, reshape=False)
    return image, label


def blur(img, p=0.5):
    if random.random() < p:
        sigma = np.random.uniform(0.1, 2.0)
        img = img.filter(ImageFilter.GaussianBlur(radius=sigma))
    return img

class RandomGenerator(object):
    def __init__(self, output_size):
        self.output_size = output_size

    def __call__(self, image,mask):
        if random.random() > 0.5:
            image, mask = random_rot_flip(image, mask)
        elif random.random() > 0.5:
            image, mask = random_rotate(image, mask)
        x, y = image.shape
        image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
        mask = zoom(mask, (self.output_size[0] / x, self.output_size[1] / y), order=0)
        image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
        mask = torch.from_numpy(mask.astype(np.uint8))
        sample = {'image': image, 'mask': mask}
        return sample

class StrongWeak(object):
    def __init__(self, output_size):
        self.output_size = output_size

    def __call__(self, image, mask):
        if random.random() > 0.5:
            image, mask = random_rot_flip(image, mask)
        elif random.random() > 0.5:
            image, mask = random_rotate(image, mask)
        x, y = image.shape
        #  进行resize
        image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
        mask = zoom(mask, (self.output_size[0] / x, self.output_size[1] / y), order=0)

        image = Image.fromarray((image * 255).astype(np.uint8))
        img_s1, img_s2 = deepcopy(image), deepcopy(image)
        image = torch.from_numpy(np.array(image)).unsqueeze(0).float() / 255.0
        if random.random() < 0.8:
            img_s1 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s1)

        img_s1 = blur(img_s1, p=0.5)
        img_s1 = torch.from_numpy(np.array(img_s1)).unsqueeze(0).float() / 255.0

        if random.random() < 0.8:
            img_s2 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s2)
        img_s2 = blur(img_s2, p=0.5)
        img_s2 = torch.from_numpy(np.array(img_s2)).unsqueeze(0).float() / 255.0

        sample = {'image': image, 'img_s1': img_s1, 'img_s2': img_s2, 'mask': mask}

        return sample


def obtain_cutmix_box(img_size, p=0.5, size_min=0.02, size_max=0.4, ratio_1=0.3, ratio_2=1/0.3):
    mask = torch.zeros(img_size, img_size)
    if random.random() > p:
        return mask

    size = np.random.uniform(size_min, size_max) * img_size * img_size
    while True:
        ratio = np.random.uniform(ratio_1, ratio_2)
        cutmix_w = int(np.sqrt(size / ratio))
        cutmix_h = int(np.sqrt(size * ratio))
        x = np.random.randint(0, img_size)
        y = np.random.randint(0, img_size)

        if x + cutmix_w <= img_size and y + cutmix_h <= img_size:
            break

    mask[y:y + cutmix_h, x:x + cutmix_w] = 1

    return mask

def patients_to_slices(patiens_num):
    if patiens_num == 0.05:
        return 68
    elif patiens_num == 0.1:
        return 136
    elif patiens_num == 0.2:
        return 256
    else:
        ref_dict = {"3": 68, "7": 136,
                    "14": 256, "21": 396, "28": 512, "35": 664, "140": 824, "300": 1024}

        return ref_dict[str(patiens_num)]


def worker_init_fn(worker_id):
    random.seed(1337 + worker_id)


def get_ssl_acdc_unimatch_loader(root=r'/home/ubuntu/li/data/ACDC', batch_size=8, unlabel_batch_size=24, train_crop_size=(256, 256), label_num=0.1):
    """
    :param root: 数据集路径
    :param batch_size: 有标注数据批次大小
    :param unlabel_batch_size: 无标注数据的batch大小
    :param label_num: 有标签的数量
    :return:
    """

    label_num = patients_to_slices(label_num)

    label_transform = RandomGenerator(train_crop_size)
    label_dataset = ACDC_UniMatch(root=root, split="train_l", label_num=label_num, transform=label_transform)

    unlabel_transform = StrongWeak(train_crop_size)
    unlabel_dataset = ACDC_UniMatch(root=root, split="train_u",label_num=label_num,transform=unlabel_transform)

    test_dataset = ACDC_UniMatch(root=root, split="test")

    label_loader = DataLoader(label_dataset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True)
    unlabel_loader1 = DataLoader(unlabel_dataset, batch_size=unlabel_batch_size, shuffle=True, num_workers=4, drop_last=True)
    unlabel_loader2 = DataLoader(unlabel_dataset, batch_size=unlabel_batch_size, shuffle=True, num_workers=4, drop_last=True)

    test_loader = DataLoader(test_dataset, batch_size=1, num_workers=4, shuffle=False)
    return label_loader,unlabel_loader1,unlabel_loader2,test_loader


def show(im):
    im = im.numpy().squeeze()
    fig = plt.figure()
    plt.imshow(im, cmap="gray")
    plt.show()
    fig.savefig("result.png")


def show_label(mask, path="label.jpg"):
    plt.figure()
    plt.imshow(mask)
    plt.show()
    Image.fromarray(mask).save(path)



if __name__ == '__main__':

    label_loader,unlabel_loader1,unlabel_loader2,test_loader = get_ssl_acdc_unimatch_loader()

    

    for img_u_w, img_u_s1, img_u_s2, cutmix_box1, cutmix_box2 in unlabel_loader1:
        print(img_u_w.shape)
        break
    # print(len(train_dataloader))
    # print(len(test_dataloader))
    # print(len(test_dataloader.dataset))
    # for image, label in train_dataloader:
    #     print(image.shape)
    #     print(label.shape)
    #     print(np.unique(label.numpy()))
    #     show(image[0])
    #     show_label(train_dataloader.dataset.label_to_img(label))
    #     break

    # for sample in test_dataloader:
    #     image, label = sample
    #     print(image.shape)
    #     print(label.shape)
    #     print(np.unique(label.numpy()))
    #     # show(image[0])
    #     # show_label(label[0].numpy())
    #     break
