import cv2
import random
import numpy as np
import xml.etree.ElementTree as ET
#! 导入自定义模块
from metric_utils import find_jaccard_overlap

def parse_annotation(annotation_path, label_map):
    """
        解析xml标注
    """
    tree = ET.parse(annotation_path)
    root = tree.getroot()

    boxes  = list()
    labels = list()
    difficulties = list()

    for object in root.iter('object'):

        difficult = int(object.find('difficult').text=='1')

        label = object.find('name').text.lower().strip()
        if label not in label_map:
            continue

        bbox = object.find('bndbox')
        xmin = int(bbox.find('xmin').text)
        ymin = int(bbox.find('ymin').text)
        xmax = int(bbox.find('xmax').text)
        ymax = int(bbox.find('ymax').text)
        
        boxes.append([xmin, ymin, xmax, ymax])
        labels.append(label_map[label])
        difficulties.append(difficult)

    return {'boxes': boxes, 'labels': labels, 'difficulties': difficulties}

def adjust_constrast_brightness(image, constrast, brightness):
    """
        调整对比度和亮度, 参考: https://blog.csdn.net/tywwwww/article/details/126626804
        constrast : 对比度
        brightness: 亮度
    """
    _constrast  = 100
    _brightness = 100

    constrast  -= _constrast
    brightness -= _brightness
    
    if(constrast>0):
        delta = 127. * constrast / 100
        alpha = 255. / (255. - delta * 2)
        beta  = alpha * (brightness - delta)
    else:
        delta = -128. * constrast / 100
        alpha = (256. - delta * 2) / 255.
        beta = alpha * brightness + delta

    image = image.astype(np.float32)
    mean = image.mean()
    new_image = alpha * (image - mean) + beta + mean
    new_image = np.clip(new_image,0.,255.).astype(np.uint8)
    return new_image

def adjust_brightness(image, factor=1.0, *useless_parameters):
    """
        调整图片亮度
        factor: brightness调节系数, [0,2]之间
    """
    brightness = 100 + (factor - 1.0) * 100
    return adjust_constrast_brightness(image, 100, brightness)

def adjust_constrast(image, factor=1.0, *useless_parameters):
    """
        调整图片对比度
        factor: 调节系数, [0,2]之间
    """    
    constrast  = 100 + (factor - 1.0) * 100
    brightness = 100 + (factor - 1.0) * 100
    return adjust_constrast_brightness(image, constrast, brightness)

def adjust_saturation(image, factor=1.0, bgr=False):
    """
        调整图片饱和度
    """
    new_image = image.copy().astype(np.float32)
    new_image = cv2.cvtColor(new_image,cv2.COLOR_BGR2HSV) \
        if bgr else cv2.cvtColor(new_image,cv2.COLOR_RGB2HSV)
    
    H = new_image[:,:,0]
    S = new_image[:,:,1] #! S
    V = new_image[:,:,2] #! V

    S *= factor
    S = np.clip(S,0.,255.)

    new_image = np.dstack([H,S,V])
    new_image = cv2.cvtColor(new_image,cv2.COLOR_HSV2BGR).astype(np.uint8) \
        if bgr else cv2.cvtColor(new_image,cv2.COLOR_HSV2RGB).astype(np.uint8)
    
    return new_image

def adjust_hue(image, factor=0.0, bgr=False):
    """
        调整图片色相, 色相区域[0,360]
        factor: 调节系数, [-1,1]之间
    """      
    new_image = image.copy().astype(np.float32)
    new_image = cv2.cvtColor(new_image,cv2.COLOR_BGR2HSV) \
        if bgr else cv2.cvtColor(new_image,cv2.COLOR_RGB2HSV)
    
    H = new_image[:,:,0]
    S = new_image[:,:,1] #! S
    V = new_image[:,:,2] #! V

    H += factor * 255
    H = np.clip(H,0.,360.)

    new_image = np.dstack([H,S,V])
    new_image = cv2.cvtColor(new_image,cv2.COLOR_HSV2BGR).astype(np.uint8) \
        if bgr else cv2.cvtColor(new_image,cv2.COLOR_HSV2RGB).astype(np.uint8)
    
    return new_image

def photometric_distort(image, bgr=False):
    """
        图片的光学变换
    """
    new_image = image.copy()
    distortions = [adjust_brightness,
                   adjust_constrast,
                   adjust_saturation,
                   adjust_hue
                ]
    
    random.shuffle(distortions)

    for d in distortions:
        if random.random() < 0.5:
            if d.__name__ == 'adjust_hue':
                adjust_factor = random.uniform(-18/255., 18/255.)
            else:
                adjust_factor = random.uniform(0.5, 1.5)

            new_image = d(new_image, adjust_factor, bgr)
    
    return new_image

def expand(image, boxes, filler, bgr=False):
    """
        图片扩大, 进入的图片是归一化后的
        图片通道默认是RGB
    """
    original_h = image.shape[0]
    original_w = image.shape[1]
    max_scale = 4
    scale = random.uniform(1,max_scale)
    new_h = int(scale * original_h)
    new_w = int(scale * original_w)

    filler = filler if not bgr else filler[::-1]
    new_image = np.ones(shape=(new_h, new_w, 3)) * np.array(filler)[None,None]

    left = random.randint(0, new_w - original_w)
    right = left + original_w
    top = random.randint(0, new_h - original_h)
    bottom = top + original_h
    
    new_image[top: bottom, left: right, :] = image

    new_boxes = boxes + np.array([left, top, left, top])[None]

    return new_image, new_boxes

def random_crop(image, boxes, labels, difficulties):
    """
        随机剪切
    """
    original_h = image.shape[0]
    original_w = image.shape[1]

    while True:
        min_overlap = random.choice([0., 0.1, 0.3, 0.5, 0.7, 0.9, None])

        if min_overlap is None:
            return image, boxes, labels, difficulties

        max_trials = 50
        for _ in range(max_trials):
            min_scale = 0.3
            scale_h = random.uniform(min_scale,1)
            scale_w = random.uniform(min_scale,1)
            new_h = int(scale_h * original_h)
            new_w = int(scale_w * original_w)

            aspect_ratio = new_h / new_w
            if not 0.5 < aspect_ratio < 2:
                continue
            
            left = random.randint(0, original_w - new_w)
            right = left + new_w
            top = random.randint(0, original_h - new_h)
            bottom = top + new_h

            crop = np.array([left, top, right, bottom],dtype=np.float32)

            overlap = find_jaccard_overlap(crop[None], boxes)[0]

            if overlap.max() < min_overlap:
                continue

            new_image = image[top: bottom, left: right, :]

            bb_centers = (boxes[:,:2] + boxes[:,2:]) / 2.
            centers_in_crop = (bb_centers[:,0] > left) * (bb_centers[:,0] < right) * (bb_centers[:,1] > top) * (bb_centers[:,1] < bottom)

            if not centers_in_crop.any():
                continue
            
            new_boxes  = boxes[centers_in_crop]
            new_labels = labels[centers_in_crop]
            new_difficulties = difficulties[centers_in_crop]

            new_boxes[:,:2] = np.maximum(new_boxes[:,:2], crop[:2])
            new_boxes[:,:2] -= crop[:2]
            new_boxes[:,2:] = np.minimum(new_boxes[:,2:], crop[2:])
            new_boxes[:,2:] -= crop[:2]

            return new_image, new_boxes, new_labels, new_difficulties
        
def flip(image, boxes):
    """
        图像水平翻转
    """
    new_image = cv2.flip(image,1) #! 水平翻转
    new_boxes = boxes.copy()
    new_boxes[:, 0] = image.shape[1] - boxes[:, 0]
    new_boxes[:, 2] = image.shape[1] - boxes[:, 2]
    new_boxes = new_boxes[:,[2, 1, 0, 3]]

    return new_image, new_boxes

def resize(image, boxes, wh=(300,300), return_percent_coords=True):
    """
        图像拉伸
    """
    new_image = cv2.resize(image,wh)
    
    old_dims = np.array([image.shape[1],image.shape[0],image.shape[1],image.shape[0]])[None]
    new_boxes = boxes / old_dims

    if not return_percent_coords:
        new_dims = np.array([new_image.shape[1],new_image.shape[0],new_image.shape[1],new_image.shape[0]])[None]
        new_boxes = new_boxes * new_dims

    return new_image, new_boxes

def normalize(image, mean, std, bgr=False):
    """
        图像标准化
    """
    mean = mean if not bgr else mean[::-1]
    std  = std  if not bgr else std [::-1]

    new_image = image.copy()
    mean = np.array(mean)[None,None]
    std = np.array(std)[None,None]
    new_image = (new_image - mean) / std
    
    return new_image

def transform(image, boxes, labels, difficulties, split, wh, bgr=False):
    """
        图像变换
        wh: (w, h)
    """
    assert split in {'TRAIN','TEST'}
    
    #! 原作是torchvision统计mean和std, 通道应该是RGB, 在数据集制作的环节将通道转成了RGB
    mean = [0.485, 0.456, 0.406]
    std  = [0.229, 0.224, 0.225]
    #! 在教程基础上修改的代码用的是cv2, 通道应该是BGR
    mean = mean if not bgr else mean[::-1]
    std  = std  if not bgr else std [::-1]

    new_image  = image.copy()
    new_boxes  = boxes.copy()
    new_labels = labels.copy()
    new_difficulties = difficulties.copy()

    if split == 'TRAIN':
        new_image = photometric_distort(new_image, bgr=bgr) #! 原始图片
        
        new_image = new_image / 255. #! 归一化后的图片

        if random.random() < 0.5:
            new_image, new_boxes = expand(new_image, new_boxes, filler=mean, bgr=bgr) 

        new_image, new_boxes, new_labels, new_difficulties = random_crop(new_image, new_boxes, new_labels, new_difficulties)

        new_image = (new_image * 255.).astype(np.uint8) #! 原图片

        if random.random() < 0.5:
            new_image, new_boxes = flip(new_image, new_boxes)
    
    new_image, new_boxes = resize(new_image, new_boxes, wh=wh) #! bboxes返回的是相对位置

    new_image = new_image / 255. #! 归一化

    new_image = normalize(new_image, mean, std, bgr=bgr)
    
    return new_image, new_boxes, new_labels, new_difficulties
