import torch.nn as nn
from torchvision.transforms.functional import normalize
import glob
import os

import numpy
from torchvision import transforms
import torch
from PIL import Image
import numpy as np
import pandas as pd
import random
import numbers
import torchvision
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, Dataset
import albumentations as A
from torchvision.utils import make_grid
from albumentations.pytorch.transforms import ToTensorV2


def get_label_info():
    return {
        'Animal': [64, 128, 64, 0],
        'Archway': [192, 0, 128, 0],
        'Bicyclist': [0, 128, 192, 1],
        'Bridge': [0, 128, 64, 0],
        'Building': [128, 0, 0, 1],
        'Car': [64, 0, 128, 1],
        'CartLuggagePram': [64, 0, 192, 0],
        'Child': [192, 128, 64, 0],
        'Column_Pole': [192, 192, 128, 1],
        'Fence': [64, 64, 128, 1],
        'LaneMkgsDriv': [128, 0, 192, 0],
        'LaneMkgsNonDriv': [192, 0, 64, 0],
        'Misc_Text': [128, 128, 64, 0],
        'MotorcycleScooter': [192, 0, 192, 0],
        'OtherMoving': [128, 64, 64, 0],
        'ParkingBlock': [64, 192, 128, 0],
        'Pedestrian': [64, 64, 0, 1],
        'Road': [128, 64, 128, 1],
        'RoadShoulder': [128, 128, 192, 0],
        'Sidewalk': [0, 0, 192, 1],
        'SignSymbol': [192, 128, 128, 1],
        'Sky': [128, 128, 128, 1],
        'SUVPickupTruck': [64, 128, 192, 0],
        'TrafficCone': [0, 0, 64, 0],
        'TrafficLight': [0, 64, 64, 0],
        'Train': [192, 64, 128, 0],
        'Tree': [128, 128, 0, 1],
        'Truck_Bus': [192, 128, 192, 0],
        'Tunnel': [64, 0, 64, 0],
        'VegetationMisc': [192, 192, 0, 0],
        'Void': [0, 0, 0, 0],
        'Wall': [64, 192, 0, 0]
    }


class CamVid(Dataset):
    """
    因为数据集较少，需要进行数据加强
    """
    CLASSES = ['Bicyclist', 'Building', 'Car', 'Column_Pole',
               'Fence', 'Pedestrian', 'Road', 'Sidewalk',
               'SignSymbol', 'Sky', 'Tree', 'backgroud']

    PALETTE = np.array([[0, 128, 192], [128, 0, 0], [64, 0, 128], [192, 192, 128],
                        [64, 64, 128], [64, 64, 0], [128, 64, 128], [0, 0, 192],
                        [192, 128, 128], [128, 128, 128], [128, 128, 0], [0, 0, 0]])

    def __init__(self, root, split="train", transform=None):
        """

        Args:
            image_path: 图片地址
            label_path: 标签地址
            csv_path: csv地址
            scale: 缩放倍数
            loss: 使用损失函数
            mode: 模式
        """
        super(CamVid, self).__init__()
        self.root = root
        self.split = split
        self.transform = transform
        self.img_dir = []
        self.ann_dir = []
        self.load_annotations()  # 加载文件路径

    def __getitem__(self, index):
        img = np.array(Image.open(self.img_dir[index]))
        label = np.array(Image.open(self.ann_dir[index]))
        label = self.img_to_label(label)

        if self.transform is not None:
            result = self.transform(image=img, mask=label)
            img, label = result["image"], result["mask"]

        return img, label

    def __len__(self):
        return len(self.img_dir)

    def load_annotations(self, ):
        """
        加载文件路径
        """
        image_path = []
        label_path = []
        if self.split == "train":
            image_path.append(os.path.join(self.root, "train"))
            label_path.append(os.path.join(self.root, "train_labels"))
            image_path.append(os.path.join(self.root, "val"))
            label_path.append(os.path.join(self.root, "val_labels"))
        else:
            image_path.append(os.path.join(self.root, "test"))
            label_path.append(os.path.join(self.root, "test_labels"))

        for image_path_ in image_path:
            self.img_dir.extend(glob.glob(os.path.join(image_path_, '*.png')))
        self.img_dir.sort()
        for label_path_ in label_path:
            self.ann_dir.extend(glob.glob(os.path.join(label_path_, '*.png')))
        self.ann_dir.sort()
        self.label_info = get_label_info()
        assert len(self.img_dir) == len(
            self.ann_dir), "image length is not equal to label length"
        print("mode:{}\t load:{} images".format(self.split, len(self.img_dir)))

    def img_to_label(self, img):
        semantic_map = np.zeros(img.shape[:-1])
        class_index = 0
        for index, info in enumerate(self.label_info):
            color = self.label_info[info][:3]
            class_11 = self.label_info[info][3]
            if class_11 == 1:
                equality = np.equal(img, color)  # 如果在 这11 个类别中。就是类别信息
                class_map = np.all(equality, axis=-1)
                semantic_map[class_map] = class_index
                class_index += 1
            else:
                equality = np.equal(img, color)  # 如果不在这11 个类别中的话。就是背景信息
                class_map = np.all(equality, axis=-1)
                semantic_map[class_map] = 255
        return semantic_map

    def label_to_img(self, label):
        if isinstance(label, torch.Tensor):
            label = label.numpy()
        if not isinstance(label, np.ndarray):
            label = np.array(label)
        label = label.astype(np.int64)
        label[label == 255] = 11
        img = self.PALETTE[label]
        if len(img.shape) == 4:
            img = torch.tensor(img).permute(0, 3, 1, 2)
            img = make_grid(tensor=img, nrow=2, scale_each=True)
            img = np.uint8(img.permute(1, 2, 0).numpy())

        return img


def get_camvid_loader(root=r"/root/autodl-tmp/CamVid", batch_size=2,
                      train_crop_size=(720, 960), test_crop_size=(720, 960)):
    train_transform = A.Compose([
        A.RandomResizedCrop(
            height=train_crop_size[0], width=train_crop_size[1], scale=(0.5, 2.0)),  # 随机裁剪缩放
        A.HorizontalFlip(p=0.5),  # 随机水平翻转
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensorV2(),
    ])
    test_transform = A.Compose([
        A.CenterCrop(height=test_crop_size[0], width=test_crop_size[1]),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensorV2(),
    ])

    train_dataset = CamVid(root=root, split="train", transform=train_transform)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4,
                              drop_last=True, pin_memory=True)

    test_dataset = CamVid(root=root, split="test", transform=test_transform)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=False,
                             num_workers=4, pin_memory=True)

    return train_loader, test_loader


class Denormalize(object):
    def __init__(self, mean, std):
        mean = np.array(mean)
        std = np.array(std)
        self._mean = -mean / std
        self._std = 1 / std

    def __call__(self, tensor):
        if isinstance(tensor, np.ndarray):
            return (tensor - self._mean.reshape(-1, 1, 1)) / self._std.reshape(-1, 1, 1)
        return normalize(tensor, self._mean, self._std)


def show_image(image):
    plt.figure()
    image = image.numpy()
    denorm = Denormalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225])
    image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
    plt.imshow(image)
    plt.show()


def show_label(mask, title):
    fig = plt.figure()
    plt.imshow(mask)
    plt.title(title)
    plt.show()
    fig.savefig('{}.png'.format(title))


if __name__ == '__main__':
    train_loader, test_loader = get_camvid_loader(
        root=r"/home/ubuntu/data/CamVid", batch_size=4)
    print(len(train_loader))
    print(len(test_loader))
    from torchvision.utils import save_image

    for images, labels in train_loader:
        print(images.shape)
        print(np.unique(labels))
        show_image(images[0])
        show_label(train_loader.dataset.label_to_img(labels), "camvid_train")
        break

    for images, labels in test_loader:
        print(images.shape)
        print(np.unique(labels))
        # save_image(images, "test_loader.png")
        show_label(test_loader.dataset.label_to_img(labels), "camvid_test")
        break
