import json
import os
from collections import namedtuple

import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt

from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2


class Cityscapes(Dataset):
    CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
                                                     'has_instances', 'ignore_in_eval', 'color'])
    classes = [
        CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
        CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
        CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
        CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
        CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
        CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
        CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
        CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
        CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
        CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
        CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
        CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
        CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
        CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
        CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
        CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
        CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
        CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
        CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
        CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
        CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
        CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
        CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
        CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
        CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
        CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
        CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
        CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
        CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
        CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
        CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
        CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
        CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
        CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
        CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
    ]

    train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
    train_id_to_color.append([0, 0, 0])
    train_id_to_color = np.array(train_id_to_color)
    id_to_train_id = np.array([c.train_id for c in classes])

    CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
               'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
               'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
               'bicycle', "background")

    PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
               [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
               [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
               [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
               [0, 80, 100], [0, 0, 230], [119, 11, 32], [0, 0, 0]]

    def __init__(self, root, mode="gtFine", split='train', target_type='semantic', crop_size=(512, 1024)):
        self.root = os.path.expanduser(root)
        self.mode = mode
        self.target_type = target_type
        self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
        self.targets_dir = os.path.join(self.root, "gtFine_trainvaltest", "gtFine", split)
        self.split = split
        self.images = []
        self.targets = []
        self.to_tensor = transforms.Compose([
            transforms.ToTensor(),
        ])
        if split == "train":
            self.transform = A.Compose([
                #  随机裁剪缩放
                A.RandomResizedCrop(height=crop_size[0], width=crop_size[1], scale=(0.75, 1.0), ratio=(1.0, 1.0)),
                #  随机水平翻转
                A.HorizontalFlip(p=0.5),
                #  随机 改变亮度，饱和度
                A.ColorJitter(0.1, 0.1, 0.1),
                # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])
        else:
            self.transform = A.Compose([
                A.RandomCrop(height=crop_size[0], width=crop_size[1]),
                # A.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
            ])

        if split not in ['train', 'test', 'val']:
            raise ValueError('Invalid split for mode! Please use split="train", split="test"'
                             ' or split="val"')

        if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
            raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
                               ' specified "split" and "mode" are inside the "root" directory')

        for city in os.listdir(self.images_dir):
            img_dir = os.path.join(self.images_dir, city)
            target_dir = os.path.join(self.targets_dir, city)
            for file_name in os.listdir(img_dir):
                self.images.append(os.path.join(img_dir, file_name))
                target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
                                             self._get_target_suffix(self.mode, self.target_type))
                self.targets.append(os.path.join(target_dir, target_name))

    @classmethod
    def encode_target(self, target):
        return self.id_to_train_id[np.array(target)]

    @classmethod
    def decode_target(self, target):
        """
        解码信息，把白色背景解码
        """

        target[target == 255] = 19
        return self.train_id_to_color[target]

    def __getitem__(self, index):
        """
        """
        img = Image.open(self.images[index]).convert('RGB')
        target = Image.open(self.targets[index])
        # 把数据变成0-19
        target = self.encode_target(target)
        if self.transform is not None:
            #  这里进行数据加强
            result = self.transform(image=np.array(img), mask=np.array(target))
            img, target = result["image"], result["mask"]

        return img, target

    def __len__(self):
        return len(self.images)

    def _load_json(self, path):
        with open(path, 'r') as file:
            data = json.load(file)
        return data

    def _get_target_suffix(self, mode, target_type):
        if target_type == 'instance':
            return '{}_instanceIds.png'.format(mode)
        elif target_type == 'semantic':
            return '{}_labelIds.png'.format(mode)
        elif target_type == 'color':
            return '{}_color.png'.format(mode)
        elif target_type == 'polygon':
            return '{}_polygons.json'.format(mode)
        elif target_type == 'depth':
            return '{}_disparity.png'.format(mode)


def get_cityscapes_loader(path=r"/root/cityscapes", mode=True, batch_size=2, crop_size=(512, 1024)):
    train_transform = A.Compose([
        A.RandomResizedCrop(height=crop_size[0], width=crop_size[1]),
        A.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, p=0.2),
        # A.CoarseDropout(max_holes=8, p=0.5),
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=(-0.5, 0.5), p=0.9),
            A.RandomGamma(gamma_limit=(50, 200), p=0.8),
        ]),
        A.OneOf([
            A.ShiftScaleRotate(p=0.6),
            A.HorizontalFlip(p=0.8),
            A.VerticalFlip(p=0.8)
        ]),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensorV2(),
    ])
    if mode:
        train_dataset = Cityscapes(root=path, split="train", crop_size=crop_size)
        loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, )
    else:
        test_dataset = Cityscapes(root=path, split="val", crop_size=crop_size)
        loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, drop_last=True, )
    return loader


def show_image(image):
    plt.figure()
    image = image.numpy().transpose(1, 2, 0)
    print(image.shape)
    plt.imshow(image)
    plt.show()


def show_label_2(target, title):
    plt.figure()
    print(np.unique(target))
    target[target == 255] = 19
    target = train_id_to_color[target]
    plt.imshow(target)
    plt.title(title)
    plt.show()


if __name__ == '__main__':
    train_dataloader = get_cityscapes_loader(path=r"E:\note\cv\data\cityscapes", mode=True, batch_size=2)
    test_dataloader = get_cityscapes_loader(path=r"E:\note\cv\data\cityscapes", mode=False, batch_size=2)
    print(len(train_dataloader))
    print(len(test_dataloader))
    for images, labels in train_dataloader:
        print(np.unique(labels[0].numpy()))
        break
    #     show_image(images[0])
    #     show_label_2(labels[0].numpy(), "hello")
    #     break
    # for images, labels in test_dataloader:
    #     show_image(images[0])
    #     print(images[0])
    #     show_label_2(labels[0].numpy(),"hello")
    #     break
