import os.path
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import albumentations as A
from PIL import Image
from torchvision.transforms import transforms
from albumentations.pytorch.transforms import ToTensorV2
import matplotlib.pyplot as plt
from skimage import io
from torchvision.utils import make_grid


class VOC(Dataset):
    CLASSES = ['background', 'aeroplane', 'bicycle', 'bird',
               'boat',
               'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
               'diningtable',
               'dog', 'horse', 'motorbike', 'person',
               'pottedplant',
               'sheep', 'sofa', 'train', 'tv/monitor']

    PALETTE = np.array([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
                        [0, 0, 128],
                        [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0],
                        [192, 128, 0],
                        [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
                        [0, 64, 0],
                        [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]])

    def __init__(self, root, split, transform):
        self.root = root
        self.split = split
        self.transform = transform
        self.img_dir = []
        self.ann_dir = []
        self.load_annotations()  # 加载文件路径

    def __len__(self):
        return len(self.sample_list)

    def __getitem__(self, item):
        img = np.array(Image.open(self.img_path[item]).convert("RGB"))
        mask = np.array(Image.open(self.mask_path[item]))

        if self.transform is not None:
            result = self.transform(image=img, mask=mask)
            img, mask = result["image"], result["mask"]

        return img, mask

    def load_annotations(self):
        if self.split == 'train':
            with open(os.path.join(self.root, "ImageSets", "SegmentationAug", "train_aug.txt"), "r") as f:
                self.sample_list = f.readlines()
            self.sample_list = [item.replace("\n", "") for item in self.sample_list]
        else:
            with open(os.path.join(self.root, "ImageSets", "Segmentation", "val.txt"), "r") as f:
                self.sample_list = f.readlines()
            self.sample_list = [item.replace("\n", "") for item in self.sample_list]

        self.img_dir = [os.path.join(self.root, "JPEGImages", item + ".jpg") for item in self.sample_list]
        self.ann_dir = [os.path.join(self.root, "SegmentationClassAug", item + ".png") for item in self.sample_list]

        print("mode-{} load {} images".format(self.split, len(self.sample_list)))

    @classmethod
    def label_to_img(self, label):
        if isinstance(label, torch.Tensor):
            label = label.numpy()
        if not isinstance(label, np.ndarray):
            label = np.array(label)
        label = label.astype(np.int64)

        label[label == 255] = 0
        img = self.PALETTE[label]
        if len(img.shape) == 4:
            img = torch.tensor(img).permute(0, 3, 1, 2)
            img = make_grid(tensor=img, nrow=2, scale_each=True)
            img = img.permute(1, 2, 0).numpy()
            
        return img


def get_voc_loader(root, batch_size=4,
                   train_crop_size=(512, 512), test_crop_size=(512, 512), crop_val=False):
    train_transform = A.Compose([
        A.RandomResizedCrop(height=train_crop_size[0], width=train_crop_size[1]),  # 随机裁剪缩放
        A.OneOf([
            A.ShiftScaleRotate(p=0.6),
            A.HorizontalFlip(p=0.8),
            A.VerticalFlip(p=0.8)
        ]),
        A.RandomBrightnessContrast(p=0.2),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensorV2(),
    ])

    if crop_val:
        test_transform = A.Compose([
            A.Resize(height=test_crop_size[0], width=test_crop_size[1]),
            A.CenterCrop(height=test_crop_size[0], width=test_crop_size[1]),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ToTensorV2(),
        ])
    else:
        test_transform = A.Compose([
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ToTensorV2(),
        ])

    train_dataset = VOC(root=root, split="train", transform=train_transform, )
    test_dataset = VOC(root=root, split="test", transform=test_transform)

    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4,
                                  drop_last=True, pin_memory=True)

    if crop_val:
        test_dataloader = DataLoader(test_dataset, batch_size=batch_size, num_workers=4, shuffle=False, pin_memory=True)
    else:
        test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=4, shuffle=False, pin_memory=True)

    return train_dataloader, test_dataloader


def show(image):
    im = image.numpy().transpose((1, 2, 0))
    plt.figure()
    plt.imshow(im)
    plt.show()
    io.imsave("image.jpg", im)


def show_label(label, path="label.jpg"):
    im = VOC.label_to_img(label)
    plt.figure()
    plt.imshow(im)
    plt.show()
    Image.fromarray(np.uint8(im)).save(path)


if __name__ == '__main__':
    train_dataloader, test_dataloader = get_voc_loader(
        root=r"E:\note\ssl\data\voc_aug_2\VOCdevkit\VOC2012")

    # for i, (image, label) in enumerate(label_dataloader):
    #     print(image.shape)
    #     print(label.shape)
    #     print(np.unique(label.numpy()))
    #     show(image[0])
    #     show_label(label[0])
    #     break
    #
    # for image, label in unlabel_dataloader:
    #     print(image.shape)
    #     print(label.shape)
    #     print(np.unique(label.numpy()))
    #     show(image[0])
    #     show_label(label[0])
    #     break
    #
    for image, label in test_dataloader:
        print(image.shape)
        print(label.shape)
        print(np.unique(label.numpy()))
        show(image[0])
        show_label(label[0].numpy())
        break
