import cv2
from PIL import Image
from albumentations.pytorch.transforms import ToTensorV2
from albumentations import(
    OneOf, Resize, Normalize, Compose, Transpose,
    HorizontalFlip, VerticalFlip, Flip, RandomCrop,
    CenterCrop, ShiftScaleRotate, Rotate,
    RandomBrightnessContrast,
    RandomGamma, CLAHE, Sharpen, Emboss, FancyPCA, Sharpen,
    GaussianBlur, GaussNoise, Blur, MotionBlur, MedianBlur, 
    HueSaturationValue, OpticalDistortion, GridDistortion, RandomRotate90, 
    Perspective, RandomSizedCrop, ChannelShuffle, RGBShift
)
from torchvision import transforms


def randomcrop_transform_plus(img_size = (256, 256), crop_size = (224, 224), mean=[0,0,0], std=[1,1,1], rotation=180, border_mode=0):
    '''
    Transformations include (Resize, RandomCrop, RandomHorizontalFlip, \
    RandomVerticalFlip, Randomrotation, and normalize)
    '''
    if isinstance(img_size, int):
        img_size = (img_size, img_size)
    if isinstance(crop_size, int):
        crop_size = (crop_size, crop_size)
    transforms_list = []
    transforms_list.extend([
        Resize(img_size[0], img_size[1], interpolation=Image.BILINEAR),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        Rotate(rotation, p=1.0, border_mode=border_mode),
        RandomCrop(crop_size[0], crop_size[1]),
        Normalize(mean=mean, std=std, p=1),
        ToTensorV2(),
        ]) 
    transforms = Compose(transforms_list)
    return transforms

def randomcrop_transform(img_size = (256, 256), crop_size = (224, 224), mean=[0,0,0], std=[1,1,1], border_mode=0):
    '''
    Transformations include (Resize, RandomCrop and normalize)
    '''
    # img_size = (height, width)
    # crop_size = (height, width)
    if isinstance(img_size, int):
        img_size = (img_size, img_size)
    if isinstance(crop_size, int):
        crop_size = (crop_size, crop_size)
    transforms_list = []
    transforms_list.extend([
        Resize(img_size[0], img_size[1], interpolation=Image.BILINEAR),
        RandomCrop(crop_size[0], crop_size[1]),
        Normalize(mean=mean, std=std, p=1),
        ToTensorV2(),
        ]) 
    transforms = Compose(transforms_list)
    return transforms

def centercrop_tranform(img_size = (256, 256), crop_size = (224, 224), mean=(0,0,0), std=(1,1,1)):
    '''
    Basic transformations only include (resize, centercrop and normalize)
    '''
    # img_size = (height, width)
    # crop_size = (height, width) 
    if isinstance(img_size, int):
        img_size = (img_size, img_size)
    if isinstance(crop_size, int):
        crop_size = (crop_size, crop_size)
    transforms_list = []
    transforms_list.extend([
        Resize(img_size[0], img_size[1], interpolation=Image.BILINEAR),
        CenterCrop(crop_size[0], crop_size[1]),
        Normalize(mean=mean, std=std, p=1),
        ToTensorV2(),
        ]) 
    transforms = Compose(transforms_list)
    return transforms

def randomflip_transform(img_size = (256, 256), mean=[0,0,0], std=[1,1,1]):
    '''
    Transformations include (Resize,RandomHorizontalFlip, RandomHorizontalFlip, \
    RandomVerticalFlip, and normalize)
    '''
    # img_size = (height, width)
    # crop_size = (height, width)
    if isinstance(img_size, int):
        img_size = (img_size, img_size)
    transforms_list = []
    transforms_list.extend([
        Resize(img_size[0], img_size[1], interpolation=Image.BILINEAR),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        Normalize(mean=mean, std=std, p=1),
        ToTensorV2(),
        ]) 
    transforms = Compose(transforms_list)
    return transforms

def resize_transform_basic(img_size = (256, 256), mean=[0,0,0], std=[1,1,1]):
    '''
    Transformations only include (Resize and normalize)
    '''
    # img_size = (height, width)
    # crop_size = (height, width)
    if isinstance(img_size, int):
        img_size = (img_size, img_size)
    transforms_list = []
    transforms_list.extend([
        Resize(img_size[0], img_size[1], interpolation=Image.BILINEAR),
        Normalize(mean=mean, std=std, p=1),
        ToTensorV2(),
        ]) 
    transforms = Compose(transforms_list)
    return transforms


def normalize_only(img_size=(224, 224), mean=[0,0,0], std=[1,1,1]):
    '''
    Transformations only include (Resize and normalize)
    '''
    # img_size = (height, width)
    # crop_size = (height, width)
    transforms_list = []
    transforms_list.extend([
        Resize(img_size[0], img_size[1], interpolation=Image.BILINEAR),
        # CLAHE(p=1.0, clip_limit=4, tile_grid_size=(8, 8)),
        Normalize(mean=mean, std=std, p=1),
        ToTensorV2(),
        ])
    transforms = Compose(transforms_list)
    return transforms


def multi_transforms_heavy(img_size=(1024,512), mean=(0.485,0.456, 0.406), std=(0.229,0.224,0.225)):
    transforms_list = []
    transforms_list.extend([
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        ShiftScaleRotate(rotate_limit=10, border_mode=0, p=0.5),

        OneOf([
            GaussianBlur(blur_limit=3, p=0.4),
            RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1, p=0.4),
            Sharpen(alpha=(0.2, 0.3), p=0.3),
        ], p=1),

        OneOf([
            FancyPCA(p=0.3),
            HueSaturationValue(hue_shift_limit=0.1, sat_shift_limit=0.1, val_shift_limit=10, p=0.3),
        ], p=1),

        Resize(img_size[0]+32, img_size[1]+32, interpolation=Image.BILINEAR),
        Normalize(mean=mean, std=std, p=1),
        RandomCrop(img_size[0], img_size[1]),
        ToTensorV2(),
    ]) 
    transforms = Compose(transforms_list)
    return transforms

def multi_transforms_medium(img_size=(1024,512), mean=(0.485,0.456, 0.406), std=(0.229,0.224,0.225), padding=32):
    transforms_list = []
    transforms_list.extend([
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),

        OneOf([
            GaussianBlur(blur_limit=3, p=0.4),
            RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1, p=0.4),
            Sharpen(alpha=(0.2, 0.3), p=0.3),
        ], p=1),

        Resize(img_size[0]+padding, img_size[1]+padding, interpolation=Image.BILINEAR),
        Normalize(mean=mean, std=std, p=1),
        RandomCrop(img_size[0], img_size[1]),
        ToTensorV2(),
    ])
    transforms = Compose(transforms_list)
    return transforms

def multi_transforms_medium_2(img_size=(1024,512), mean=(0.485,0.456, 0.406), std=(0.229,0.224,0.225), padding=32):
    transforms_list = []
    transforms_list.extend([
        Resize(height=img_size[0], width=img_size[1]),
        RandomRotate90(p=0.5),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        Transpose(p=0.5),
        Rotate(limit=5, p=0.5),
        OneOf([
            GaussianBlur(blur_limit=3, p=0.4),
            Sharpen(alpha=(0.2, 0.3), p=0.3),
        ], p=1),
        RandomSizedCrop(min_max_height=(int(img_size[0] * 0.9), img_size[0]), height=img_size[0], width=img_size[1], p=0.5),
        RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5, p=0.8),
        Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0),
        ToTensorV2()
    ])
    transforms = Compose(transforms_list)
    return transforms

def multi_transforms_medium_4(img_size=(1024,512), mean=(0.485,0.456, 0.406), std=(0.229,0.224,0.225), padding=32):
    padding = [x+int(x*0.1) for x in img_size]
    transforms_list = []
    transforms_list.extend([
        Resize(height=img_size[0], width=img_size[1]),
        RandomRotate90(p=0.5),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        Transpose(p=0.5),
        Rotate(limit=5, p=0.75),
        CLAHE(p=1.0, clip_limit=4, tile_grid_size=(8, 8)),
        RandomBrightnessContrast(p=0.9, brightness_limit=0.3, contrast_limit=0.8),
        ChannelShuffle(p=0.5),
        RGBShift(p=0.5, r_shift_limit=50, g_shift_limit=50, b_shift_limit=50),
        RandomSizedCrop(min_max_height=(int(img_size[0] * 0.9), img_size[0]), height=img_size[0], width=img_size[1], p=0.5),
        Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0),
        ToTensorV2(),
    ])
    transforms = Compose(transforms_list)
    return transforms
