#! image_augment.py
#! 图像增强方法
import cv2
import random
import numpy as np
import albumentations as A
"""
    该文档中默认图片通道为 BGR
    函数中没有参数 return_percent_coords 的, bounding box 均为绝对位置, 而非相对位置
    所有函数中入参的 bouding box 默认均为绝对位置, 而非相对位置
"""

"""
    以下SSD300中使用过的图像增强方法
"""
def adjust_constrast_and_brightness(image, constrast, brightness):
    """
        调整图片对比度和亮度, reference: https://blog.csdn.net/tywwwww/article/details/126626804
        constrast: 对比度
        brightness: 亮度
    """
    _constrast  = 100
    _brightness = 100

    constrast  -= _constrast
    brightness -= _brightness
    
    if(constrast > 0):
        delta = 127. * constrast / 100
        alpha = 255. / (255. - delta * 2)
        beta  = alpha * (brightness - delta)
    else:
        delta = -128. * constrast / 100
        alpha = (256. - delta * 2) / 255.
        beta  = alpha * brightness + delta

    image = image.astype(np.float32)
    mean = image.mean()
    new_image = alpha * (image - mean) + beta + mean
    new_image = np.clip(new_image,0.,255.).astype(np.uint8)
    return new_image

def adjust_brightness(image, factor=1.0, *useless_parameters):
    """
        调整图片亮度
        factor: brightness调节系数, [0,2]之间
    """
    brightness = 100 + (factor - 1.0) * 100
    return adjust_constrast_and_brightness(image, 100, brightness)

def adjust_constrast(image, factor=1.0, *useless_parameters):
    """
        调整图片对比度
        factor: 调节系数, [0,2]之间
    """    
    constrast  = 100 + (factor - 1.0) * 100
    brightness = 100 + (factor - 1.0) * 100
    return adjust_constrast_and_brightness(image, constrast, brightness)

def adjust_saturation(image, factor=1.0, bgr=True):
    """
        调整图片饱和度
        factor: 调节系数, 理论上[0,inf]
    """
    new_image = image.copy().astype(np.float32)
    new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2HSV) \
        if bgr else cv2.cvtColor(new_image, cv2.COLOR_RGB2HSV)
    
    H = new_image[:,:,0]
    S = new_image[:,:,1] #! S
    V = new_image[:,:,2] #! V

    S *= factor
    S = np.clip(S,0.,255.)

    new_image = np.dstack([H,S,V])
    new_image = cv2.cvtColor(new_image,cv2.COLOR_HSV2BGR).astype(np.uint8) \
        if bgr else cv2.cvtColor(new_image,cv2.COLOR_HSV2RGB).astype(np.uint8)
    
    return new_image

def adjust_hue(image, factor=0.0, bgr=True):
    """
        调整图片色相, 色相区域[0,360]
        factor: 调节系数, [-1,1]之间
    """      
    new_image = image.copy().astype(np.float32)
    new_image = cv2.cvtColor(new_image,cv2.COLOR_BGR2HSV) \
        if bgr else cv2.cvtColor(new_image,cv2.COLOR_RGB2HSV)
    
    H = new_image[:,:,0]
    S = new_image[:,:,1] #! S
    V = new_image[:,:,2] #! V

    H += factor * 255
    H = np.clip(H,0.,360.)

    new_image = np.dstack([H,S,V])
    new_image = cv2.cvtColor(new_image,cv2.COLOR_HSV2BGR).astype(np.uint8) \
        if bgr else cv2.cvtColor(new_image,cv2.COLOR_HSV2RGB).astype(np.uint8)
    
    return new_image

def hist_equalize(image, clahe=True, bgr=True):
    """
        直方图均衡化, 使图像的灰度范围拉开或使灰度均匀分布, 从而增大反差, 使图像细节清晰
        YOLO中使用
    """
    yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV) if bgr else cv2.cvtColor(image, cv2.COLOR_RGB2YUV)

    if clahe:
        c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        yuv[:, :, 0] = c.apply(yuv[:, :, 0])
    else:
        yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) #! equalize Y channel histogram

    new_image = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR) if bgr else cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB)

    return new_image

def photometric_distort(image, bgr=True):
    """
        图片的光学扭曲: 亮度/对比度/饱和度/色相 上述四种处理随机选择
                       后续增加 模糊/中值滤波/转灰度图/直方图均衡化 四种随机处理方法
        photometric: 测光的
        distort: 扭曲
    """
    new_image = image.copy()

    distortions = [adjust_brightness, #! 亮度    image, factor=1.0, *useless_parameters
                   adjust_constrast,  #! 对比度  image, factor=1.0, *useless_parameters
                   adjust_saturation, #! 色相    image, factor=1.0, bgr=True
                   adjust_hue         #! 饱和对  image, factor=0.0, bgr=True
                ]                     #! 以上四种也可以使用albumentations完成

    random.shuffle(distortions)

    for distortion in distortions:
        if random.random() < 0.5:
            if distortion.__name__ == 'adjust_hue':
                adjust_factor = random.uniform(-18/255., 18/255.)
            else:
                adjust_factor = random.uniform(0.5, 1.5)

            new_image = distortion(new_image, adjust_factor, bgr)
    
    #! 以下几种处理在YOLO中使用
    p_factor = random.uniform(0.01,0.5)
    gamma_low  = random.uniform(20,80)
    gamma_high = random.uniform(80,120)

    transforms = [A.Blur(p=p_factor),       #! 模糊
                  A.MedianBlur(p=p_factor), #! 中值滤波
                  A.ToGray(p=p_factor),     #! 转灰度
                  A.RandomGamma(gamma_limit=(gamma_low,gamma_high),p=p_factor), #! 随机灰度/gamma变换
                  hist_equalize,            #! 直方图均衡化 image, clahe=True, bgr=True
                ]
    
    random.shuffle(transforms)

    for t in transforms:
        if random.random() < 0.5:
            try:
                name = t.__name__ #! 尝试获取函数名称
            except:
                name = None       #! 无法获取函数名称则赋None值
            
            if name is not None:  #! 对应 hist_equalize
                new_image = t(new_image, bgr=bgr)
            else:
                out = t(image=new_image)
                new_image = out['image']
    
    return new_image

def expand(image, boxes, filler=[0.406, 0.456, 0.485], bgr=True):
    """
        图片扩大, 原图像尺寸不变, 扩大后的部分用默认值填充
        传入的图片是归一化后的, 值大小在[0,1]之间
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2], 值大小理论上在[0,inf]之间
        filler默认是一个均值填充, ImageNet图片归一化后的三通道均值, RGB通道下应为[0.485, 0.456, 0.406]
    """
    original_h = image.shape[0]
    original_w = image.shape[1]
    max_scale = 4 #! 默认是4倍, 修改为了3倍
    scale = random.uniform(1,max_scale)
    new_h = int(scale * original_h)
    new_w = int(scale * original_w)

    filler = filler if bgr else filler[::-1] #! RGB的话 调整filler顺序
    new_image = np.ones(shape=(new_h, new_w, 3)) * np.array(filler)[None,None]

    left = random.randint(0, new_w - original_w)
    right = left + original_w
    top = random.randint(0, new_h - original_h)
    bottom = top + original_h
    
    new_image[top: bottom, left: right, :] = image

    new_boxes = boxes + np.array([left, top, left, top])[None]

    return new_image, new_boxes

def expand_on_yolo(image, boxes, filler=[0.406, 0.456, 0.485], bgr=True):
    """
        图片扩大在yolo标注下的实现, 原图像尺寸不变, 扩大后的部分用默认值填充
        传入的图片是归一化后的, 值大小在[0,1]之间
        boxes是归一化后的ndarray数组, 值大小在[0,1]之间
        filler默认是一个均值填充, ImageNet图片归一化后的三通道均值, RGB通道下应为[0.485, 0.456, 0.406]
    """    
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes = expand(image, xyxy_boxes, filler, bgr=bgr)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    return new_image, cxcywh_boxes

def xyxy2cxcywh(image, boxes):
    """
        boxes: 真实坐标 [x1,y1,x2,y2]
        return: 相对坐标 [cx,cy,w,h] 值范围在[0,1]之间
    """
    h, w = image.shape[:2]
    new_boxes = np.concatenate([(boxes[:,:2] + boxes[:,2:])/2,
                                 boxes[:,2:] - boxes[:,:2]], axis=-1)
    new_boxes = new_boxes / np.array([w, h, w, h])
    return new_boxes

def cxcywh2xyxy(image, boxes):
    h, w = image.shape[:2]
    new_boxes = boxes * np.array([w, h, w, h])
    new_boxes = np.concatenate([new_boxes[:,:2] - new_boxes[:,2:]/2,
                                new_boxes[:,:2] + new_boxes[:,2:]/2], axis=-1)
    return new_boxes

def xyxy2xyxyxyxy(boxes):
    """
        boxes按照[x1,y1,x2,y2]顺序排布
    """
    x1, y1, x3, y3 = boxes[:, 0, None], boxes[:, 1, None], boxes[:, 2, None], boxes[:, 3, None]
    new_boxes = np.concatenate([x1, y1, x3, y1, x3, y3, x1, y3], axis=-1)

    return new_boxes

def xyxyxyxy2xyxy(boxes, lefttop=0):
    """
        boxes默认按照[x1,y1,x2,y2,x3,y3,x4,y4]顺序排布
        boxes为顺时针旋转后的坐标, 需要还原的话, 以拟时针还原
    """
    if lefttop==0:   #! 未旋转
        x1, y1, _, _, x3, y3, _, _ = np.split(boxes,range(1, 8), axis=-1)
    elif lefttop==1: #! 顺时针旋转270度
        _, _, x1, y1, _, _, x3, y3 = np.split(boxes,range(1, 8), axis=-1)
    elif lefttop==2: #! 顺时针旋转180度
        x3, y3, _, _, x1, y1, _, _ = np.split(boxes,range(1, 8), axis=-1)
    else:            #! 顺时针旋转90度
        _, _, x3, y3, _, _, x1, y1 = np.split(boxes,range(1, 8), axis=-1)

    new_boxes = np.concatenate([x1, y1, x3, y3], axis=-1)

    return new_boxes

def find_intersection(set_1,set_2):
    """
        计算两个集合相交的部分
        set_1/set_2的格式`x1,y1,x2,y2`
    """
    lower_bounds = np.maximum(set_1[:,None,:2],set_2[None,:,:2])   #! (N1,N2,2) (x1,y1)
    upper_bounds = np.minimum(set_1[:,None,2:],set_2[None,:,2:])   #! (N1,N2,2) (x2,y2)
    intersection_dims = np.maximum(upper_bounds - lower_bounds, 0) #! (N1,N2,2) ( w, h)
    return intersection_dims[...,0] * intersection_dims[...,1]     #! 面积

def find_jaccard_overlap(set_1,set_2):
    """
        寻找两个集合的交并比, set_1/set_2的格式`x1,y1,x2,y2`
        set_1 (N1,4)
        set_2 (N2,4)
        return (N1,N2)
    """
    intersection = find_intersection(set_1,set_2)

    areas_set_1 = (set_1[...,2] - set_1[...,0]) * (set_1[...,3] - set_1[...,1]) #! (N1)
    areas_set_2 = (set_2[...,2] - set_2[...,0]) * (set_2[...,3] - set_2[...,1]) #! (N2)

    union = areas_set_1[...,None] + areas_set_2[None] - intersection

    return intersection / union

def random_crop(image, boxes, labels, difficulties):
    """
        图片随机剪切, 剪切后如果目标中心点在图像中则保留
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2]
    """
    original_h = image.shape[0]
    original_w = image.shape[1]

    while True:
        # min_overlap = random.choice([0., 0.1, 0.3, 0.5, 0.7, 0.9, None])
        min_overlap = random.choice([0.3, 0.5, 0.7, None])

        if min_overlap is None:
            return image, boxes, labels, difficulties

        max_trials = 50
        for _ in range(max_trials):
            min_scale = 0.3
            scale_h = random.uniform(min_scale,1)
            scale_w = random.uniform(min_scale,1)
            new_h = int(scale_h * original_h)
            new_w = int(scale_w * original_w)

            aspect_ratio = new_h / new_w
            if not 0.5 < aspect_ratio < 2:
                continue
            
            left = random.randint(0, original_w - new_w)
            right = left + new_w
            top = random.randint(0, original_h - new_h)
            bottom = top + new_h

            crop = np.array([left, top, right, bottom],dtype=np.float32)

            overlap = find_jaccard_overlap(crop[None], boxes)[0] #! 交并比

            if overlap.max() < min_overlap:
                continue

            new_image = image[top: bottom, left: right, :]

            bb_centers = (boxes[:,:2] + boxes[:,2:]) / 2.
            centers_in_crop = (bb_centers[:,0] > left) * (bb_centers[:,0] < right) * (bb_centers[:,1] > top) * (bb_centers[:,1] < bottom)

            if not centers_in_crop.any():
                continue
            
            new_boxes  = boxes[centers_in_crop]
            new_labels = labels[centers_in_crop]
            new_difficulties = difficulties[centers_in_crop]

            new_boxes[:,:2] = np.maximum(new_boxes[:,:2], crop[:2])
            new_boxes[:,:2] -= crop[:2]
            new_boxes[:,2:] = np.minimum(new_boxes[:,2:], crop[2:])
            new_boxes[:,2:] -= crop[:2]

            return new_image, new_boxes, new_labels, new_difficulties
        
def random_crop_on_yolo(image, boxes, labels, difficulties):
    """
        图片随机剪切在yolo标注下的实现
        剪切后如果目标中心点在图像中则保留
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2]
    """
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes, new_labels, new_difficulties = random_crop(image, xyxy_boxes, labels, difficulties)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    return new_image, cxcywh_boxes, new_labels, new_difficulties

def size_crop(image, boxes, labels, difficulties, new_shape):
    """
        图片按照一定的尺寸大小进行剪切, 剪切后如果目标中心点在图像中则保留
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2]
        new_shape: (w, h)
    """
    original_h = image.shape[0]
    original_w = image.shape[1]
    
    new_w, new_h = new_shape

    left = random.randint(0, original_w - new_w)
    right = left + new_w
    top = random.randint(0, original_h - new_h)
    bottom = top + new_h

    crop = np.array([left, top, right, bottom],dtype=np.float32)

    new_image = image[top: bottom, left: right, :]

    bb_centers = (boxes[:,:2] + boxes[:,2:]) / 2.
    centers_in_crop = (bb_centers[:,0] > left) * (bb_centers[:,0] < right) * (bb_centers[:,1] > top) * (bb_centers[:,1] < bottom)
   
    new_boxes  = boxes[centers_in_crop]
    new_labels = labels[centers_in_crop]
    new_difficulties = difficulties[centers_in_crop]

    if not centers_in_crop.any(): #! 裁剪后的图片没有一个标注框
        return new_image, new_boxes, new_labels, new_difficulties

    #! 剪裁后的图片存在标注框
    new_boxes[:,:2] = np.maximum(new_boxes[:,:2], crop[:2])
    new_boxes[:,:2] -= crop[:2]
    new_boxes[:,2:] = np.minimum(new_boxes[:,2:], crop[2:])
    new_boxes[:,2:] -= crop[:2]

    return new_image, new_boxes, new_labels, new_difficulties

def size_crop_on_yolo(image, boxes, labels, difficulties, new_shape):
    """
        在yolo标注下的实现
        图片按照一定的尺寸大小进行剪切, 剪切后如果目标中心点在图像中则保留
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2]
        new_shape: (w, h)
    """
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes, new_labels, new_difficulties = size_crop(image, xyxy_boxes, labels, difficulties, new_shape)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    return new_image, cxcywh_boxes, new_labels, new_difficulties

def flip(image, boxes):
    """
        图像水平翻转
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2]
    """
    new_image = cv2.flip(image, 1) #! 水平翻转
    new_boxes = boxes.copy()
    new_boxes[:, 0] = image.shape[1] - boxes[:, 0]
    new_boxes[:, 2] = image.shape[1] - boxes[:, 2]
    new_boxes = new_boxes[:,[2, 1, 0, 3]]

    return new_image, new_boxes

def flip_on_yolo(image, boxes):
    """
        图像水平翻转在yolo标注上的实现
    """
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes = flip(image, xyxy_boxes)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    return new_image, cxcywh_boxes

def resize(image, boxes, new_wh, return_percent_coords=True):
    """
        图像拉伸
        return: new_boxes的坐标是绝对坐标, 真实宽高
    """
    new_image = cv2.resize(image, new_wh)
    
    old_dims = np.array([image.shape[1], image.shape[0], image.shape[1], image.shape[0]])[None]
    new_boxes = boxes / old_dims

    if not return_percent_coords:
        new_dims = np.array([new_image.shape[1], new_image.shape[0], new_image.shape[1], new_image.shape[0]])[None]
        new_boxes = new_boxes * new_dims

    return new_image, new_boxes

def resize_on_yolo(image, boxes, new_wh, return_percent_coords=True):
    """
        图像拉伸在yolo标注上的实现
    """    
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes = resize(image, xyxy_boxes, new_wh, return_percent_coords=False)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    if not return_percent_coords:
        cxcywh_boxes *= np.array([new_wh[0], new_wh[1], new_wh[0], new_wh[1]])

    return new_image, cxcywh_boxes

def normalize(image, mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229], bgr=True):
    """
        图像标准化
        RGB通道下应为[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
    """
    mean = mean if bgr else mean[::-1]
    std  = std  if bgr else std [::-1]

    new_image = image.copy()
    mean = np.array(mean)[None, None]
    std = np.array(std)[None, None]
    new_image = (new_image - mean) / std

    return new_image

def denormalize(image, mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229], bgr=True):
    """
        图像反/去标准化
        RGB通道下应为[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
    """
    mean = mean if bgr else mean[::-1]
    std  = std  if bgr else std [::-1]

    new_image = image.copy()
    mean = np.array(mean)[None, None]
    std = np.array(std)[None, None]
    new_image = new_image * std + mean

    return new_image

def transform(image, boxes, labels, difficulties, split, new_wh, bgr=True, return_percent_coords=True):
    """
        图像变换, 将以上处理进行应用
        默认图像为 BGR 通道排列
        RGB通道下应为[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
        return: new_boxes是相对宽高
    """
    assert split in {'TRAIN','TEST'}

    mean = [0.406, 0.456, 0.485]
    std  = [0.225, 0.224, 0.229]
    mean = mean if bgr else mean[::-1]
    std  = std  if bgr else std [::-1]

    new_image  = image.copy()
    new_boxes  = boxes.copy()
    new_labels = labels.copy()
    new_difficulties = difficulties.copy()

    if split == 'TRAIN':

        new_image = photometric_distort(new_image, bgr=bgr) #! 原始图片
        
        new_image = (new_image).astype(np.float32) / 255. #! 归一化后的图片

        if random.random() < 0.5: #! 扩大图片
            new_image, new_boxes = expand(new_image, new_boxes, filler=mean, bgr=bgr)

        if random.random() < 0.5: #! 扭曲图片, 由于训练集会采用 letterbox , 正式预测图片时原图有较大概率会被扭曲
            new_image, new_boxes = random_distort(new_image, new_boxes)

        #! 随机剪切
        new_image, new_boxes, new_labels, new_difficulties = random_crop(new_image, new_boxes, new_labels, new_difficulties)

        new_image = (new_image * 255.).astype(np.uint8) #! 原图片, *255是因为需要与 split=='TEST' 的图片数值范围和标注保持一致, 即[0,255]

        if random.random() < 0.5: #! 水平翻转图片
            new_image, new_boxes = flip(new_image, new_boxes)

        if random.random() < 0.5: #! 随机旋转
            new_image, new_boxes = random_rotate(new_image, new_boxes)
    
        new_image, new_boxes = letterbox(new_image, new_boxes, new_wh, return_percent_coords=return_percent_coords) #! 保持原图像宽高比的resize
    
    else: #! TEST不做letterbox
        
        new_image, new_boxes = resize(new_image, new_boxes, new_wh, return_percent_coords=return_percent_coords) #! 普通的resize

    new_image = new_image.astype(np.float32) / 255. #! 归一化

    new_image = normalize(new_image, mean, std) #! 标准化
    
    return new_image, new_boxes, new_labels, new_difficulties

def transform_on_yolo(image, boxes, labels, difficulties, split, new_wh, bgr=True, return_percent_coords=True):
    """
        图像变换在yolo标注上的应用
    """
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes, new_labels, new_difficulties = transform(image, xyxy_boxes, labels, difficulties, split, new_wh, bgr=bgr, return_percent_coords=False)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    if not return_percent_coords:
        cxcywh_boxes *= np.array([new_wh[0], new_wh[1], new_wh[0], new_wh[1]])

    return new_image, cxcywh_boxes, new_labels, new_difficulties

"""
    以下为YOLO中使用过的图像增强
"""
def letterbox(image, boxes, new_wh, filler=(0.406, 0.456, 0.485), bgr=True, return_percent_coords=True):
    """
        保持原始图片长宽比例不变
        将图像缩放至new_shape内的最大矩形
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2]
        wh: (w, h)
        filler: 由于传入的image是归一化之后的, 故填充使用ImageNet图像均值
                若传入image是未归一化的, 应该使用对应的[0~255]值, (104, 116, 124)
        return: new_boxes是真实宽高
    """
    original_h, original_w = image.shape[:2]
    if isinstance(new_wh, int):
        new_wh = (new_wh, new_wh)

    r = min(new_wh[0] / original_w, new_wh[1] / original_h)

    scaleup = random.random() < 0.5 #! 随机控制原始图片是否拉伸

    if scaleup: #! 拉伸至(填满), 还是一定比例限制
        if r > 1.1: #! 原图尺寸小于缩放图尺寸, 且需扩大比例大于1.1
            if random.random() < 0.5: #! 是否对缩放比例进行限制
                c = random.choice([0.9, 1.0, 1.1, None])
                if c is not None:
                    r = c

    new_w = int(original_w * r)
    new_h = int(original_h * r)

    resize_image = cv2.resize(image, (new_w, new_h))

    filler = filler if bgr else filler[::-1]
    new_image = np.ones(shape=(*new_wh[::-1], 3)) * np.array(filler)[None,None]

    left   = (new_wh[0] - new_w) // 2
    right  = left + new_w
    top    = (new_wh[1] - new_h) // 2
    bottom = top + new_h

    new_image[top: bottom, left: right, :] = resize_image

    resize_boxes = boxes * r #! 缩放后的标注坐标

    new_boxes = resize_boxes + np.array([left, top, left, top])[None]

    if return_percent_coords: #! 返回相对位置
        new_boxes /= np.array([new_w, new_h, new_w, new_h])

    return new_image, new_boxes

def letterbox_on_yolo(image, boxes, new_wh, filler=(0.406, 0.456, 0.485), bgr=True, return_percent_coords=True):
    """
        在yolo标注下的实现
        保持原始图片长宽比例不变
        将图像缩放至new_shape内的最大矩形
        boxes是未归一化的ndarray数组, [x1,y1,x2,y2]
        new_shape: (w, h)
        filler: 由于传入的image是归一化之后的, 故填充使用ImageNet图像均值
                若传入image是未归一化的, 应该使用对应的[0~255]值, (104, 116, 124)        
    """
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes = letterbox(image, xyxy_boxes, new_wh, filler=filler, bgr=bgr, return_percent_coords=False)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    if not return_percent_coords:
        cxcywh_boxes *= np.array([new_wh[0], new_wh[1], new_wh[0], new_wh[1]])    

    return new_image, cxcywh_boxes

def random_rotate(image, boxes):
    """
        在90/180/270三个角度中随机顺时针旋转
    """
    angle = random.choice([cv2.ROTATE_90_CLOCKWISE,         #! 顺时针 90度
                           cv2.ROTATE_180,                  #! 顺时针180度
                           cv2.ROTATE_90_COUNTERCLOCKWISE]) #! 顺时针270度
    
    #! 对图像的处理
    new_images = cv2.rotate(image, angle) #! 旋转后的图像

    #! 对坐标的处理
    center_xy = np.array([image.shape[1] / 2, image.shape[0] / 2]) #! (w, h) #! 原图中心点
    
    angle = 90 if angle==cv2.ROTATE_90_CLOCKWISE else (180 if angle==cv2.ROTATE_180 else 270) #! 旋转角度

    lefttop = 3 if angle==90 else (2 if angle==180 else 1) 
    #! 旋转后左顶点的序号
    #! 假设未旋转前, 以左顶点开始, 顺时针顺序的序号分别为: 0, 1, 2, 3
    #! 旋转后的顶点顺序, 以未旋转前的序号为准
    #!  90: 3, 0, 1, 2
    #! 180: 2, 3, 0, 1
    #! 270: 1, 2, 3, 0
    #! 顺序必须正确, 要关注转换后坐标的顺序
    
    rotated_center_xy = center_xy if angle == 180 else np.array([image.shape[0] / 2, image.shape[1] / 2]) #! 旋转后的中心点

    radian = np.math.pi * angle / 180 #! 角度弧度化

    M = np.array([[ np.math.cos(radian), np.math.sin(radian)],  #! 顺时针的变换矩阵, 如果是逆时针的话, 将23项对调
                  [-np.math.sin(radian), np.math.cos(radian)]])
    
    new_boxes = xyxy2xyxyxyxy(boxes)
    new_boxes = new_boxes.reshape([-1,4,2]) - center_xy[None, None]
    new_boxes = (new_boxes @ M + rotated_center_xy[None, None]).reshape([-1,8])
    new_boxes = xyxyxyxy2xyxy(new_boxes, lefttop=lefttop)

    return new_images, new_boxes

def random_rotate_on_yolo(image, boxes):
    """
        在90/180/270三个角度中随机顺时针旋转
        在yolo标注下的实现
    """    
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes = random_rotate(image, xyxy_boxes)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    return new_image, cxcywh_boxes

def random_distort(image, boxes):
    """
        图片随机扭曲, 使失真
    """
    original_h, original_w = image.shape[:2]
    hdr = random.uniform(0.7, 1.3) #! 新高为原高的比例在(0.7, 1.3)之间
    wdr = random.uniform(0.7, 1.3) #! 新宽为原宽的比例在(0.7, 1.3)之间
    new_h, new_w = int(original_h * hdr), int(original_w * wdr)

    new_image = cv2.resize(image, (new_w, new_h))

    new_boxes = boxes * np.array([wdr, hdr, wdr, hdr])

    return new_image, new_boxes

def random_distort_on_yolo(image, boxes):
    """
        图片扭曲, 使失真
        在yolo标注下的实现
    """    
    xyxy_boxes = cxcywh2xyxy(image, boxes)

    new_image, new_boxes = random_distort(image, xyxy_boxes)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    return new_image, cxcywh_boxes

def mixup(image_list, boxes_list, labels_list, difficulties_list, new_wh, crop=False, filler=(0.406, 0.456, 0.485), bgr=True, return_percent_coords=True):
    """
        从图像列表中随机选择4张不同的图片, 组合拼接
        设置两种组合拼接方式:
        1. 将原图片扭曲到指定大小, 进行拼接
        2. 将原图随机裁剪, 根据裁剪后的图片大小进行拼接(可进行一定的缩放, 保持原图片宽高比不变)
        wh: (w, h)
    """
    w, h = new_wh
    filler = filler if bgr else filler[::-1]
    new_image = np.ones((h, w, 3)) * np.array(filler)[None, None]
    new_boxes = []
    new_labels = []
    new_difficulties = []

    cross_x = int(random.uniform(w * 0.3, w * 0.7))
    cross_y = int(random.uniform(h * 0.3, h * 0.7))
    new_shapes = [ #! (w, h)
        (  cross_x,   cross_y),
        (w-cross_x,   cross_y),
        (  cross_x, h-cross_y),
        (w-cross_x, h-cross_y)]
    left   = [0, cross_x, 0, cross_x]
    right  = [cross_x, w, cross_x, w]
    top    = [0, 0, cross_y, cross_y]
    bottom = [cross_y, cross_y, h, h]

    for i in range(4):

        indices = list(range(len(image_list)))   #! 取索引
        indice = random.choice(indices)          #! 取随机索引
        image = image_list[indice]               #! 取随机索引对应图片
        boxes = boxes_list[indice]               #! 取随机索引对应标注框
        labels = labels_list[indice]             #! 取随机索引对应类别
        difficulties = difficulties_list[indice] #! 取随机索引对应困难度
        image_list.pop(indice)                   #! 删除已选择图片
        boxes_list.pop(indice)                   #! 删除已选择标注框
        labels_list.pop(indice)                  #! 删除已选择类别
        difficulties_list.pop(indice)            #! 删除已选择困难度

        if not crop: #! 默认采用扭曲拼接

            resized_image, resized_boxes = resize(image, boxes, new_shapes[i], return_percent_coords=False) #! boxes为区块内相对位置, 必须加上平移
            resized_image = normalize(resized_image.astype(np.float32) / 255.)
            new_image[top[i]: bottom[i], left[i]: right[i], :] = resized_image
            resized_boxes = resized_boxes + np.array([left[i], top[i], left[i], top[i]])[None] #! 坐标平移

            new_boxes.append(resized_boxes)
            new_labels.append(labels)
            new_difficulties.append(difficulties)
        
        else: #! 采用尺度剪切拼接, 如原图尺寸小于需剪裁尺寸, 则默认采用扭曲拼接

            if image.shape[1] < new_shapes[i][0] or image.shape[0] < new_shapes[i][1]: #! 宽高任一小于需剪裁尺寸, 使用letterbox扩大

                max_shape = max(image.shape[1], new_shapes[i][0], image.shape[0], new_shapes[i][1])

                image, boxes = letterbox(image, boxes, (max_shape, max_shape), return_percent_coords=False) #! 得到的box是绝对坐标

            cropped_image, cropped_boxes, cropped_labels, cropped_difficulties = size_crop(image, boxes, labels, difficulties, new_shapes[i]) #! 得到的box是绝对坐标
            cropped_image = normalize(cropped_image.astype(np.float32) / 255.)
            new_image[top[i]: bottom[i], left[i]: right[i], :] = cropped_image
            cropped_boxes = cropped_boxes + np.array([left[i], top[i], left[i], top[i]])[None] #! 坐标平移

            new_boxes.append(cropped_boxes)
            new_labels.append(cropped_labels)
            new_difficulties.append(cropped_difficulties)

    new_boxes = np.concatenate(new_boxes, axis=0)
    new_labels = np.concatenate(new_labels, axis=0)
    new_difficulties = np.concatenate(new_difficulties, axis=0)

    if return_percent_coords: new_boxes /= np.array([w, h, w, h])
    
    return new_image, new_boxes, new_labels, new_difficulties

def mixup_on_yolo(image_list, boxes_list, labels_list, difficulties_list, new_wh, crop=False, filler=(0.406, 0.456, 0.485), bgr=True, return_percent_coords=True):
    """
        在yolo标注上的实现
        从图像列表中随机选择4张不同的图片, 组合拼接
        设置两种组合拼接方式:
        1. 将原图片扭曲到指定大小, 进行拼接
        2. 将原图随机裁剪, 根据裁剪后的图片大小进行拼接(可进行一定的缩放, 保持原图片宽高比不变)
        new_shape: (w, h)
    """
    xyxy_boxes_list = [cxcywh2xyxy(image, boxes) for (image, boxes) in zip(image_list, boxes_list)]
    
    new_image, new_boxes, new_labels, new_difficulties = \
        mixup(image_list, xyxy_boxes_list, labels_list, difficulties_list, new_wh=new_wh, crop=crop, filler=filler, bgr=bgr, return_percent_coords=False)

    cxcywh_boxes = xyxy2cxcywh(new_image, new_boxes)

    if not return_percent_coords:
        cxcywh_boxes *= np.array([new_wh[0], new_wh[1], new_wh[0], new_wh[1]])   

    return new_image, cxcywh_boxes, new_labels, new_difficulties