import os

import albumentations as A
import cv2
import pandas as pd
from albumentations.pytorch import ToTensorV2
from pandas import DataFrame
from torch.utils.data import DataLoader
from torch.utils.data import Dataset


class OuluNpuDataset(Dataset):

    def __init__(self, data_root, df: DataFrame, mode, transform: A.Compose):
        self.data_root = data_root
        self.df = df
        self.mode = mode
        self.transform = transform

    def __getitem__(self, index):
        video, img_file, label = self.df.iloc[index].values
        if self.mode == 'train':
            rgb_path = os.path.join(self.data_root, 'Train_files', video, img_file)
            msr_path = os.path.join(self.data_root, 'Train_files_MSR', video, img_file)
        elif self.mode == 'validation':
            rgb_path = os.path.join(self.data_root, 'Dev_files', video, img_file)
            msr_path = os.path.join(self.data_root, 'Dev_files_MSR', video, img_file)
        else:
            rgb_path = os.path.join(self.data_root, 'Test_files', video, img_file)
            msr_path = os.path.join(self.data_root, 'Test_files_MSR', video, img_file)

        image = cv2.imread(rgb_path, cv2.IMREAD_COLOR)
        rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        msr = cv2.imread(msr_path, cv2.IMREAD_COLOR)
        msr = cv2.cvtColor(msr, cv2.COLOR_RGB2GRAY)

        transformed = self.transform(image=rgb, mask=msr)
        rgb = transformed["image"]
        msr = transformed["mask"] / 1.
        msr = msr.unsqueeze(0).repeat(3, 1, 1)

        return {'image': rgb, 'msr': msr, 'label': label}

    def __len__(self):
        r = self.df.shape[0]
        return r


def create_train_transform():
    return A.Compose([
        A.HorizontalFlip(),
        A.Resize(height=128, width=128),
        A.RandomCrop(height=114, width=114),
        A.ShiftScaleRotate(rotate_limit=(-20, 20), border_mode=cv2.BORDER_CONSTANT, p=0.5),
        A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2(),
    ])


def create_val_test_transform():
    return A.Compose([
        A.Resize(height=114, width=114),
        A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2(),
    ])


def get_oulu_npu_dataloader(model, args):
    input_size = model.default_cfg['input_size']
    # resize = int(input_size[1] / model.default_cfg['crop_pct'])
    train_df = pd.read_csv('data/data_train.csv')
    # train_df = train_df.iloc[:100]
    train_transform = create_train_transform()
    train_data = OuluNpuDataset(data_root=args.data_dir, df=train_df, mode='train', transform=train_transform)
    train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers,
                              pin_memory=True, drop_last=True)

    val_df = pd.read_csv('data/data_val.csv')
    # val_df = val_df.iloc[:100]
    val_transform = create_val_test_transform()
    val_data = OuluNpuDataset(data_root=args.data_dir, df=val_df, mode='validation', transform=val_transform)
    val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
                            pin_memory=True, drop_last=False)
    return train_loader, val_loader


def get_oulu_npu_test_dataloader(model, args):
    input_size = model.default_cfg['input_size']
    # resize = int(input_size[1] / model.default_cfg['crop_pct'])
    test_df = pd.read_csv('data/data_test.csv')
    test_transform = create_val_test_transform()
    test_data = OuluNpuDataset(data_root=args.data_dir, df=test_df, mode='test', transform=test_transform)
    test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
                             pin_memory=True, drop_last=False)
    return test_loader
