import albumentations as albu
import albumentations.augmentations.transforms as transforms
from scipy import ndimage
import numpy as np
import torch.nn.functional as F
import torch
def resize(image,points,size=(368,512)):
    data = create_transformer([transforms.Resize(size[0],size[1])],image,points)
    return data['image'],data['keypoints']

def center_crop(image,points,height,width):
    ## height 和 width 为裁剪区域距离边框的距离
    image_shape = image.shape
    data = create_transformer([transforms.CenterCrop(height=image_shape[0]-2*height,width=image_shape[1]-2*width)],image,points)
    return data['image'],data['keypoints']


def tensor_resize(image_tensor,heatmap_tensor,train_size,p= 0.5):
    if np.random.uniform() < p:
        a = 16*np.random.randint(-2,2)
        b = 16*np.random.randint(-2,2)
        scale_size = [train_size[0]+a,train_size[1]+b]
        image_tensor = F.upsample(image_tensor,size=scale_size,mode='bilinear')
        heatmap_tensor = F.upsample(heatmap_tensor, size=scale_size, mode='bilinear')
    return image_tensor,heatmap_tensor
def random_crop(image,label,limit):
    anno_x_1 = np.random.randint(1,limit)
    anno_x_2 = np.random.randint(1,limit)
    anno_y_1 = np.random.randint(1,limit)
    anno_y_2 = np.random.randint(1,limit)
    
    image = image[anno_x_1:-anno_x_2,anno_y_1:-anno_y_2]
    label = [[0,0] if point[0] == 40 else [point[0]-anno_x_1,point[1]-anno_y_1] for point in label]
    return image,label
    

def random_rotate_and_crop(image,heatmap,crop_limit = 60,angle_limit = 30,only_y = False):
    ## 输入的是image和heatmap。注意旋转后因为避免损失图片信息，尺寸会变大。
    ## 再通过随机裁剪操作还原成原来的尺寸
    ## 旋转
    angle = np.random.randint(0-angle_limit,angle_limit)
    image  = ndimage.rotate(image,angle=angle,axes=(1,0))
    heatmap = ndimage.rotate(heatmap,angle=angle,axes=(2,1))

    ## 为了拟合预测时。box大小变化的问题，我们随机裁剪一个尺寸的图片(长宽比例也是随机的)然后resize到统一尺寸进行训练和预测。
    ## 生成要裁掉的范围.主要这里用来裁剪的数据已经在生成的时候就四个放向都扩大了40个像素点。所以随机的范围为60,因为实际预测的时候，
    # 预测得到的box 的框需要扩大20在进行输入。图片放大的概率较大 。
    
    ## 注意random的范围要从1开始，如果取到0,则y位置的位0,裁剪的数据尺寸位0
    random_cut_x_1 = np.random.randint(1,crop_limit)
    random_cut_x_2 = np.random.randint(1,crop_limit)
    random_cut_y_1 = np.random.randint(1,crop_limit)
    random_cut_y_2 = np.random.randint(1,crop_limit)
    if not only_y:
        image = image[random_cut_x_1:-random_cut_x_2, random_cut_y_1:-random_cut_y_2]
        heatmap = heatmap[:, random_cut_x_1:-random_cut_x_2, random_cut_y_1:-random_cut_y_2]
    else:
        image = image[:,random_cut_y_1:-random_cut_y_2]
        heatmap = heatmap[:, :, random_cut_y_1:-random_cut_y_2]
    return image,heatmap

def single_random_rotate_and_crop(image,heatmap,crop_size,angle_limit = 15):
    ## 输入的是image和heatmap。注意旋转后因为避免损失图片信息，尺寸会变大。
    ## 再通过随机裁剪操作还原成原来的尺寸
    ## 旋转
    angle = np.random.randint(0-angle_limit,angle_limit)
    image  = ndimage.rotate(image,angle=angle,axes=(1,0))
    heatmap = ndimage.rotate(heatmap,angle=angle,axes=(2,1))

    ## 旋转后尺寸必定变大，进行裁剪。为了保持在得到box后裁剪进行预测的尺寸和训练的尺寸一致，则
    ## 根据图片尺寸转换后进行裁剪操作。box的尺寸加上30后进行预测。而训练数据是根据坐标+60后裁剪
    ## 得到的。
    image_shape = image.shape
    crop_x = np.random.randint(0, image_shape[0]-crop_size[0]+1)
    crop_y = np.random.randint(0, image_shape[1]-crop_size[1]+1)
    image = image[crop_x:crop_x + crop_size[0], crop_y:crop_y + crop_size[1]]
    heatmap = heatmap[:, crop_x:crop_x + crop_size[0], crop_y:crop_y + crop_size[1]]
    return image,heatmap

def create_transformer(transformations,image,points):
    return albu.Compose(transformations, p=1, 
                        keypoint_params=albu.KeypointParams(format='yx'))(image=image, keypoints=points)



def data_aug(image,label):
    data = create_transformer([transforms.CoarseDropout(max_holes=16,max_height=8,max_width=12),
                               transforms.GaussNoise(p = 0.9),
                               transforms.GaussianBlur(p=0.9),
                               transforms.RandomBrightnessContrast(p = 0.99),
                               transforms.Downscale(scale_min=0.65,scale_max=0.95,p = 0.6),
                               # transforms.VerticalFlip(p = 0.5),
                               transforms.HorizontalFlip(p = 0.5),
                               # transforms.Rotate(limit=3)
                               ],image,label)
    return data['image'],data['keypoints']

def test(image,label):
    data = create_transformer([

    ],image,label)
    return data['image']


def aug_predict(model,input):
    pred = model(input)
    # pred_1 = torch.flip(model(torch.flip(input, [2])), [2])
    pred_2 = torch.flip(model(torch.flip(input, [3])), [3])
    # pred_3 = torch.flip(model(torch.flip(input, [2,3])), [2,3])
    # pred = (pred+pred_1+pred_2+pred_3)/4
    pred = (pred+pred_2)/2
    return pred
    
    
   
def multi_model(input,model1,model2,use_aug):
    if use_aug:
        pred_1 = aug_predict(model1,input)
        pred_2 = aug_predict(model2,input)

    else:
        pred_1 = model1(input)
        pred_2 = model2(input)

    pred = (pred_1+pred_2)/2
    return pred
    
    

if __name__ == '__main__':
    import cv2
    image = cv2.imread('E:\共享文件夹\BraTs19素材\BraTs19论文可视化\BraTs19论文可视化\异常数据/train\BraTS19_TCIA10_109_1--WT_dice_0.95--TC_dice_0.71--ET_dice_0.01\可视化展示/shwo.png')
    image = test(image,[[3,2],[3,3]])
