import os
import cv2
import numpy as np
import pandas as pd
from PIL import Image
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
from torchvision import transforms
from utils.rle_parse import make_mask
from utils.data_augmentation import data_augmentation


def augmentation(image, mask):
    """进行数据增强
    Args:
        image: 原始图像
        mask: 原始掩膜
    Return:
        image_aug: 增强后的图像，Image图像
        mask: 增强后的掩膜，Image图像
    """
    image_aug, mask_aug = data_augmentation(image, mask)
    image_aug = Image.fromarray(image_aug)

    return image_aug, mask_aug


def mask_only_collate_fun(batch):
    """自定义collate_fn函数，用于从一个batch中去除没有掩膜的样本
    """
    batch_scale = list()
    for image, mask in batch:
        pair = list()
        mask_pixel_num = torch.sum(mask)
        if mask_pixel_num > 0:
            pair.append(image)
            pair.append(mask)
            batch_scale.append(pair)
    if len(batch_scale) > 0:
        batch_scale = default_collate(batch_scale)
    else:
        batch_scale = torch.tensor(batch_scale)

    return batch_scale


class DefaultClassDataset(Dataset):
    """
    用于分类
    """

    def __init__(self, df, data_folder, phase, height, width, mean=[0.485, 0.456, 0.406],
                 std=[0.229, 0.224, 0.225], augmentation_flag=True):
        super().__init__()
        self.df = df
        self.root = data_folder
        self.phase = phase
        self.height = height
        self.width = width
        self.mean = mean
        self.std = std
        self.augmentation_flag = augmentation_flag
        self.files = df.index.tolist()
        self.transform_image = transforms.Compose([
            transforms.ToTensor(),
            transforms.Resize((height, width)),
            transforms.Normalize(self.mean, self.std)
        ])
        self.transform_mask = transforms.Resize((height, width))

    def __getitem__(self, index):
        image_id, mask = make_mask(index, self.df)
        image_path = os.path.join(self.root, "train_images", image_id)
        img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
        if self.phase == 'train' and self.augmentation_flag:
            img, mask = augmentation(img, mask)
        img = self.transform_image(img)
        mask = self.transform_mask(torch.from_numpy(mask).permute(2, 0, 1))
        mask = mask.view(mask.size(0), -1)
        mask = torch.sum(mask, dim=1)
        mask = mask > 0

        return img, mask.float()

    def __len__(self):
        return len(self.files)
        # return 20


class DefaultSegDataset(Dataset):
    """
    用于分割
    """

    def __init__(self, df, data_folder, phase, height, width, mean=[0.485, 0.456, 0.406],
                 std=[0.229, 0.224, 0.225], augmentation_flag=True):
        super().__init__()
        self.df = df
        self.root = data_folder
        self.phase = phase
        self.height = height
        self.width = width
        self.mean = mean
        self.std = std
        self.augmentation_flag = augmentation_flag
        self.files = df.index.tolist()
        self.transform_image = transforms.Compose([
            transforms.ToTensor(),
            transforms.Resize((height, width)),
            transforms.Normalize(self.mean, self.std)
        ])
        self.transform_mask = transforms.Resize((height, width))

    def __getitem__(self, index):
        image_id, mask = make_mask(index, self.df)
        image_path = os.path.join(self.root, "train_images", image_id)
        img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
        if self.phase == 'train' and self.augmentation_flag:
            img, mask = augmentation(img, mask)
        img = self.transform_image(img)
        mask = self.transform_mask(torch.from_numpy(mask).permute(2, 0, 1))
        return img, mask

    def __len__(self):
        return len(self.files)
        # return 4


class UnsupervisedDataset(Dataset):
    def __init__(self, image_paths, phase, height, width, mean=[0.485, 0.456, 0.406],
                 std=[0.229, 0.224, 0.225], augmentation_flag=True):
        super().__init__()
        self.image_paths = image_paths
        self.phase = phase
        self.height = height
        self.width = width
        self.mean = mean
        self.std = std
        self.augmentation_flag = augmentation_flag
        self.transform_image = transforms.Compose([
            transforms.ToTensor(),
            transforms.Resize((height, width)),
            transforms.Normalize(self.mean, self.std)
        ])

    def __getitem__(self, index):
        image_path = self.image_paths[index]
        img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
        mask = np.zeros_like(img)
        if self.phase == 'train' and self.augmentation_flag:
            img, _ = augmentation(img, mask)
        img = self.transform_image(img)
        return img

    def __len__(self):
        # return len(self.image_paths)
        return 20


classify_data_map = {
    'default': DefaultClassDataset,
}

segment_data_map = {
    'default': DefaultSegDataset,
}

unsupervised_data_map = {
    'default': UnsupervisedDataset,
}


def classify_provider(
        classify_type,
        data_folder,
        batch_size,
        num_workers,
        height,
        width,
        mean,
        std,
        augmentation_flag
):
    df = pd.read_csv(os.path.join(data_folder, 'train.csv'))
    df['ImageId'], df['ClassId'] = zip(*df['ImageId_ClassId'].str.split('_'))
    df['ClassId'] = df['ClassId'].astype(int)
    df = df.pivot(index='ImageId', columns='ClassId', values='EncodedPixels')
    df['defects'] = df.count(axis=1)
    df_temp = train_test_split(df, test_size=0.2, stratify=df["defects"], random_state=69)
    train_dataset = classify_data_map.get(classify_type)(df_temp[0], data_folder, 'train', height=height,
                                                         width=width, mean=mean, std=std,
                                                         augmentation_flag=augmentation_flag)
    val_dataset = classify_data_map.get(classify_type)(df_temp[1], data_folder, 'val', height=height,
                                                       width=width, mean=mean, std=std,
                                                       augmentation_flag=augmentation_flag)
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=True,
        shuffle=True
    )
    val_dataloader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=True,
        shuffle=False
    )

    return train_dataloader, val_dataloader


def provider(
        segment_type,
        data_folder,
        batch_size,
        num_workers,
        height,
        width,
        mean,
        std,
        augmentation_flag,
        mask_only=False,
):
    df = pd.read_csv(os.path.join(data_folder, 'train.csv'))
    df['ImageId'], df['ClassId'] = zip(*df['ImageId_ClassId'].str.split('_'))
    df['ClassId'] = df['ClassId'].astype(int)
    df = df.pivot(index='ImageId', columns='ClassId', values='EncodedPixels')
    df['defects'] = df.count(axis=1)
    df_temp = train_test_split(df, test_size=0.2, stratify=df["defects"], random_state=69)
    train_dataset = segment_data_map.get(segment_type)(df_temp[0], data_folder, 'train', height=height,
                                                       width=width, mean=mean, std=std,
                                                       augmentation_flag=augmentation_flag)
    val_dataset = segment_data_map.get(segment_type)(df_temp[1], data_folder, 'val', height=height,
                                                     width=width, mean=mean, std=std,
                                                     augmentation_flag=augmentation_flag)
    if mask_only:
        train_dataloader = DataLoader(
            train_dataset,
            batch_size=batch_size,
            num_workers=num_workers,
            collate_fn=mask_only_collate_fun,
            pin_memory=True,
            shuffle=True
        )
        val_dataloader = DataLoader(
            val_dataset,
            batch_size=batch_size,
            num_workers=num_workers,
            collate_fn=mask_only_collate_fun,
            pin_memory=True,
            shuffle=False
        )
    else:
        train_dataloader = DataLoader(
            train_dataset,
            batch_size=batch_size,
            num_workers=num_workers,
            pin_memory=True,
            shuffle=True
        )
        val_dataloader = DataLoader(
            val_dataset,
            batch_size=batch_size,
            num_workers=num_workers,
            pin_memory=True,
            shuffle=False
        )

    return train_dataloader, val_dataloader


def unsupervised_provider(unsupervised_type,
                          data_folder,
                          batch_size,
                          num_workers,
                          height,
                          width,
                          mean,
                          std,
                          augmentation_flag):
    image_names = os.listdir(data_folder)
    image_paths = [os.path.join(data_folder, image_name) for image_name in image_names]
    train_image_paths, test_image_paths = train_test_split(image_paths, test_size=0.2, random_state=69)
    train_dataset = unsupervised_data_map.get(unsupervised_type)(train_image_paths, phase='train', height=height,
                                                                 width=width, mean=mean, std=std,
                                                                 augmentation_flag=augmentation_flag)
    val_dataset = unsupervised_data_map.get(unsupervised_type)(test_image_paths, phase='test', height=height,
                                                               width=width, mean=mean, std=std,
                                                               augmentation_flag=False)
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=True,
        shuffle=True
    )
    val_dataloader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=True,
        shuffle=False
    )
    return train_dataloader, val_dataloader


if __name__ == '__main__':
    train_dataloader, val_dataloader = unsupervised_provider(unsupervised_type="default",
                                                             data_folder='./Steel_data/train_images', batch_size=4,
                                                             num_workers=4, height=256, width=1600,
                                                             mean=[0.485, 0.456, 0.406],
                                                             std=[0.229, 0.224, 0.225],
                                                             augmentation_flag=True)
    for i in train_dataloader:
        print(i.shape)
        break
    for i in val_dataloader:
        print(i.shape)
        break
