#通过将图片上某些区域进行缺失形成马赛克效果，并变换色域
#缺点：可能会导致小目标特征度下降，可能会被恰好剪切掉了
import torch
import cv2
import time
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn

class Cutout(object):
    """Randomly mask out one or more patches from an image.
    #
    Args:
        n_holes (int): Number of patches to cut out of each image.
        length (int): The length (in pixels) of each square patch.
    """
    def __init__(self, n_holes, length):
        self.n_holes = n_holes
        self.length = length
        
    def __call__(self, img):
        """
        Args:
            img (Tensor): Tensor image of size (C, H, W).
        Returns:
            Tensor: Image with n_holes of dimension length x length cut out of it.
        """
        h = img.size(1) #32图片的高
        w = img.size(2) #32图片的宽

        mask = np.ones((h, w), np.float32) #32*32w*h的全1矩阵

        for n in range(self.n_holes): #n_holes=2,length=4 选择2个区域；每个区域的边长为4
            y = np.random.randint(h) #0~31随机选择一个数 y=4
            x = np.random.randint(w) #0~31随机选择一个数 x=24

            y1 = np.clip(y - self.length // 2, 0, h) #2,0,32 ->2
            y2 = np.clip(y + self.length // 2, 0, h) #6,0,32 ->6
            x1 = np.clip(x - self.length // 2, 0, w) #24-2,0,32 ->22
            x2 = np.clip(x + self.length // 2, 0, w) #24+2,0,32 ->26

            mask[y1: y2, x1: x2] = 0. #将这一小块区域去除

        mask = torch.from_numpy(mask)
        mask = mask.expand_as(img)
        # expand_as（）函数与expand（）函数类似，功能都是用来扩展张量中某维数据的尺寸，区别是它括号内的输入参数是另一个张量，作用是将输入tensor的维度扩展为与指定tensor相同的size。
        img = img * mask
        return img
    def rand_bbox(size, lam):
        W = size[2]
        H = size[3]
        cut_rat = np.sqrt(1. - lam)
        cut_w = np.int(W * cut_rat)
        cut_h = np.int(H * cut_rat)

        # uniform
        cx = np.random.randint(W)
        cy = np.random.randint(H)

        bbx1 = np.clip(cx - cut_w // 2, 0, W)
        bby1 = np.clip(cy - cut_h // 2, 0, H)
        bbx2 = np.clip(cx + cut_w // 2, 0, W)
        bby2 = np.clip(cy + cut_h // 2, 0, H)

        return bbx1, bby1, bbx2, bby2

        # generate mixed sample
        lam = np.random.beta(args.beta, args.beta)
        rand_index = torch.randperm(images.size()[0]).cuda()
        labels_a = labels
        labels_b = labels[rand_index]
        bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam)
        images[:, :, bbx1:bbx2, bby1:bby2] = images[rand_index, :, bbx1:bbx2, bby1:bby2]
        # adjust lambda to exactly match pixel ratio
        lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (images.size()[-1] * images.size()[-2]))

    # mixup function
    def mixup_data(x, y, alpha=1.0, use_cuda=True):
        '''Returns mixed inputs, pairs of targets, and lambda'''
        if alpha > 0:
            lam = np.random.beta(alpha, alpha) # bata分布随机数 
        else:
            lam = 1

        batch_size = x.size()[0]
        if use_cuda:
            index = torch.randperm(batch_size).cuda() # 返回一个[0, batch_size-1]的随机数组
        else:
            index = torch.randperm(batch_size)

        mixed_x = lam * x + (1 - lam) * x[index, :]
        y_a, y_b = y, y[index]
        return mixed_x, y_a, y_b, lam

if __name__ == "__main__":
    augmention = Cutout(20,30)#20是数量，5是cutout大小
    img = cv2.imread("test0.png")
    transfs = transforms.ToTensor()
    img_tensor = transfs(img)
    augmention(img_tensor)
    to_img = transforms.ToPILImage()
    img = to_img(augmention(img_tensor))
    now = time.strftime("%Y%m%d%H%M%S",time.localtime(time.time()))
    img.save(f'{now}.jpg')