import os

import albumentations as A
import cv2
import pandas as pd
import torch
from albumentations.pytorch import ToTensorV2
from pandas import DataFrame
from torch.utils.data import Dataset, DataLoader

from training.datasets.transform import IsotropicResize


def create_generalization_transform():
    return A.Compose([
        # A.ImageCompression(quality_lower=20, quality_upper=100, p=0.5),
        # A.GaussNoise(p=0.1),
        # A.GaussianBlur(blur_limit=3, p=0.05),
        # A.HorizontalFlip(),
        # A.OneOf([
        #     IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
        #     IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR),
        #     IsotropicResize(max_side=size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
        # ], p=1),
        # A.PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT, value=0),
        A.Blur(blur_limit=(5, 10), p=0.7),
        A.OneOf([A.RandomBrightnessContrast(), A.FancyPCA(), A.HueSaturationValue()], p=0.7),
        A.ToGray(p=0.2),
        A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=0.5),
        A.OpticalDistortion(distort_limit=(1., 2.), border_mode=cv2.BORDER_CONSTANT, p=0.5)
    ])


class S2Dataset(Dataset):

    def __init__(self, data_root, df: DataFrame, mode, transform: A.Compose):
        self.train_crops_path = os.path.join(data_root, 'train_crops')
        self.train_mask_crops_path = os.path.join(data_root, 'train_mask_crops')
        self.df = df
        self.mode = mode
        self.transform = transform
        self.generalization_transform = create_generalization_transform()

    def __getitem__(self, index):
        img_name, img_file = self.df.iloc[index].values
        # image
        img_path = os.path.join(self.train_crops_path, str(img_name), img_file)
        image = cv2.imread(img_path, cv2.IMREAD_COLOR)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        # mask
        mask_path = os.path.join(self.train_mask_crops_path, str(img_name), img_file)
        mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)

        # transform
        transformed = self.transform(image=image, mask=mask)
        image = transformed["image"]
        mask = transformed["mask"]
        mask = mask / 255.

        return {'images': image, 'masks': mask}

    def __len__(self):
        r = self.df.shape[0]
        return r


def create_transform():
    return A.Compose([
        A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2(),
    ])


def get_s2_dataloader(model, args, pin_memory=True):
    train_df = pd.read_csv('data/data_train.csv')
    train_transform = create_transform()
    train_data = S2Dataset(data_root=args.data_dir, df=train_df, mode='train', transform=train_transform)
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
    else:
        train_sampler = None
    train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=(train_sampler is None),
                              sampler=train_sampler, num_workers=args.workers, pin_memory=pin_memory, drop_last=True)

    val_df = pd.read_csv('data/data_val.csv')
    val_transform = create_transform()
    val_data = S2Dataset(data_root=args.data_dir, df=val_df, mode='validation', transform=val_transform)
    val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
                            pin_memory=pin_memory, drop_last=False)

    return train_sampler, train_loader, val_loader
