import albumentations as albu
import albumentations.augmentations.transforms as transforms
from scipy import ndimage
import numpy as np
import torch.nn.functional as F

def resize(image,points,size=(368,512)):
    data = create_transformer([transforms.Resize(size[0],size[1])],image,points)
    return data['image'],data['keypoints']

def tensor_resize(image_tensor,heatmap_tensor,train_size,p= 0.5):
    if np.random.uniform() < p:
        a = 32*np.random.randint(-3,3)
        b = 32*np.random.randint(-4,4)
        scale_size = [train_size[0]+a,train_size[1]+b]
        image_tensor = F.upsample(image_tensor,size=scale_size,mode='bicubic')
        heatmap_tensor = F.upsample(heatmap_tensor, size=scale_size, mode='bicubic')
    return image_tensor,heatmap_tensor


def random_rotate_and_crop(image,heatmap=None,crop_limit=20,p = 1,angle_limit = 15):
    if np.random.uniform() < p:
        ## 输入的是image和heatmap。注意旋转后因为避免损失图片信息，尺寸会变大。
        ## 再通过随机裁剪操作还原成原来的尺寸
        ## 旋转
        ori_size = image.shape
        angle = np.random.randint(0-angle_limit,angle_limit)
        image  = ndimage.rotate(image,angle=angle,axes=(1,0))

        ## 旋转后尺寸必定变大，进行裁剪。为了保持在得到box后裁剪进行预测的尺寸和训练的尺寸一致，则
        ## 根据图片尺寸转换后进行裁剪操作。
        random_cut_x_1 = np.random.randint(1,crop_limit)
        random_cut_x_2 = np.random.randint(1,crop_limit)
        random_cut_y_1 = np.random.randint(1,crop_limit)
        random_cut_y_2 = np.random.randint(1,crop_limit)

        image = image[random_cut_x_1:-random_cut_x_2,random_cut_y_1:-random_cut_y_2,:]
        new_size = image.shape
        image = ndimage.zoom(image, (ori_size[0] / new_size[0], ori_size[1] / new_size[1], 1), order=2)
        if heatmap is not None:
            heatmap = ndimage.rotate(heatmap, angle=angle, axes=(2, 1))
            heatmap = heatmap[:, random_cut_x_1:-random_cut_x_2,random_cut_y_1:-random_cut_y_2]
            heatmap = ndimage.zoom(heatmap,(1,ori_size[0]/new_size[0],ori_size[1]/new_size[1]),order=1)
    if heatmap is not None:
        return image,heatmap
    else:
        return image

def create_transformer(transformations,image,points):
    return albu.Compose(transformations, p=1, 
                        keypoint_params=albu.KeypointParams(format='xy'))(image=image, keypoints=points)



def data_aug(image,label):
    data = create_transformer([transforms.CoarseDropout(max_holes=15,max_height=8,max_width=10),
                               transforms.GaussNoise(p = 0.8),
                               transforms.GaussianBlur(p=0.8),
                               transforms.RandomBrightnessContrast(p = 1),

                               transforms.Downscale(scale_min=0.5,scale_max=0.9,p = 0.7),
                               transforms.HorizontalFlip(p = 0.5)
                               ],image,label)
    return data['image'],data['keypoints']

def test(image,label):
    data = create_transformer([

    ],image,label)
    return data['image']

if __name__ == '__main__':
    import cv2
    image = cv2.imread('E:\共享文件夹\BraTs19素材\BraTs19论文可视化\BraTs19论文可视化\异常数据/train\BraTS19_TCIA10_109_1--WT_dice_0.95--TC_dice_0.71--ET_dice_0.01\可视化展示/shwo.png')
    image = test(image,[[3,2],[3,3]])
