'''
    这个文件的创建是为了验证我提出的frequency_mixup数据增强方法
在COSSY_train_new_back(对所有鱼眼数据集进行划分后的训练集)进行实验,

其中Market1和Market2是未经过任何增强处理的数据集

然后我对这些数据使用角度直方图增强将其的数量变到和框的数量一样，也就是一个框对应着一个图，

然后我在这个基础上对半径划分20个区间，将框(对应的图片编号)放置到20个区间之中，
然后，统计概率，取倒数，做sogmoid，做softmax

然后再做mixup。保证我们最后mixup得到的图像数量和
我们在COSSY_train_shao/Market1中的图像数量相同。
然后观察两者分布之间的差异。

经过我们的实验，推翻了上述的统计概率，取倒数，做sogmoid，做softmax方法，而是采取另外两种方法进行采样
分布的获取，

我们利用获取之后的采样分布，采样mix up和cut up的第一个分布，然后随机采样另一个分布，进行mixup或者cut up

不过我在实现了Frequency mix up之后，观察生成结果，才发现我忽略了一个严重的问题，忘记了mixup的前提
是需要图像背景固定不变。因此得到的图像背景存在着非常大的差异。导致重影的产生，这个问题在cut up中应该不会再出现

'''


from tools.get_data_info import get_json_data
import os
from tqdm import tqdm
import copy
import math
import torchvision.transforms.functional as tvf
from PIL import Image
import torch
import numpy as np
import shutil
import random
import matplotlib.pyplot as plt
from collections import defaultdict
import xml.etree.ElementTree as ET
import json
# 定义列表，其中元素为img_box，为字典，字典的键值包括:file_name,bbox,以及需要计算的旋转角度
def boxTo_image(json_path):
    '''
    :param json_path:
    :return: imgs_boxes:一个框对应着一幅图像的列表
            one_file_boxes_angle: 所有box中心点与图像中心点连线与x轴正方向夹角，角度范围[0,2pi]
    '''
    imgs_boxes = list()
    one_file_boxes_angle = list()          # 统计所有角度
    one_file_info,_ = get_json_data(json_path,write_to_txt=False)

    for img in one_file_info:
        box_index = 0
        for box in img['boxes']:

            img_box = dict()
            # 计算和中心坐标的夹角
            img_cx = img['width']//2        # 中心点坐标
            img_cy = img['height']//2
            angle = math.atan2((box[1]-img_cy),(box[0]-img_cx))   # 角度范围为[-pi,pi]，将其变换到0-2pi
            angle = angle/math.pi*180 if (angle>=0) else (2*math.pi+angle)/math.pi*180    # 变化角度范围到[0,360]
            #
            one_file_boxes_angle.append(copy.deepcopy(angle))
            img_box['width'] = img['width']
            img_box['height'] = img['height']
            img_box['filename'] =img['filename']
            img_box['bbox'] = img['boxes']                    # 需要保存所有的boxes
            img_box['angle'] = angle
            img_box['ro_angle'] = None
            img_box['box_index'] = box_index
            img_box['max_box_index'] = len(img['boxes'])
            img_box['gaussian_std'] = float((img_box['box_index']+1)/img_box['max_box_index'])*1
            img_box['corresponding_box'] = box
            img_box['L2dist'] =np.sqrt(np.square(box[0]-img['width']/2) +
                                       np.square(box[1]-img['height']/2))
            box_index = box_index + 1
            imgs_boxes.append(copy.deepcopy(img_box))
    one_file_boxes_angle.sort()     # 对数据进行排序
    total_boxes = len(one_file_boxes_angle)
    # 取积分时，积分上界取不到
    for img_box in imgs_boxes:
        index = one_file_boxes_angle.index(img_box['angle'])
        img_box['ro_angle'] = float(index/total_boxes)*2*180  # 再映射到[0,360]上
    return imgs_boxes,one_file_boxes_angle

##########
# 对图像进行旋转

def rotate(image, degrees, labels, expand=False):
    '''
    image: PIL.Image
    labels: tensor, shape(N,5), absolute x,y,w,h, angle in degree
    '''
    img_w, img_h = image.width, image.height
    image = tvf.rotate(image, angle=-degrees, expand=expand)
    new_w, new_h = image.width, image.height
    # image coordinate to cartesian coordinate
    x = labels[:, 0] - 0.5 * img_w
    y = -(labels[:, 1] - 0.5 * img_h)
    # cartesian to polar
    r = (x.pow(2) + y.pow(2)).sqrt()

    theta = torch.empty_like(r)
    theta[x >= 0] = torch.atan(y[x >= 0] / x[x >= 0])
    theta[x < 0] = torch.atan(y[x < 0] / x[x < 0]) + np.pi
    theta[torch.isnan(theta)] = 0
    # modify theta
    theta -= (degrees * np.pi / 180)
    # polar to cartesian
    x = r * torch.cos(theta)
    y = r * torch.sin(theta)
    labels[:, 0] = x + 0.5 * new_w
    labels[:, 1] = -y + 0.5 * new_h
    labels[:, 4] += degrees
    labels[:, 4] = torch.remainder(labels[:, 4], 180)
    labels[:, 4][labels[:, 4] >= 90] -= 180

    return image, labels
def rotate_one_box(box,degrees,img_w=1080,img_h=1080):
    '''
    box: list shape 5,lenght 5,x,y,w,h, angle in degree
    labels: tensor, shape(N,5), absolute x,y,w,h, angle in degree
    '''
    # 我们使用鱼眼图像,使得我们的图像长和宽保持不变
    new_w, new_h = img_w, img_h
    # image coordinate to cartesian coordinate
    x = box[0] - 0.5 * img_w
    y = -(box[1] - 0.5 * img_h)
    # cartesian to polar
    r = np.sqrt(np.square(x) + np.square(y))

    theta = math.atan(y / x) if x >= 0 else (math.atan(y / x)+np.pi)

    theta = 0 if math.isnan(theta) else theta
    # modify theta
    theta -= (degrees * np.pi / 180)
    # polar to cartesian
    x = r * math.cos(theta)
    y = r * math.sin(theta)
    box[0] = x + 0.5 * new_w
    box[1] = -y + 0.5 * new_h
    box[4] += degrees
    box[4] = math.remainder(box[4], 180)
    box[4] = box[4]-180 if box[4] >= 90 else box[4]

    return box

def fancy_pca(img, alpha_std=0.1):
    '''
    INPUTS:
    img:  numpy array with (h, w, rgb) shape, as ints between 0-255)
    alpha_std:  how much to perturb/scale the eigen vecs and vals
                the paper used std=0.1
    RETURNS:
    numpy image-like array as float range(0, 1)
    NOTE: Depending on what is originating the image data and what is receiving
    the image data returning the values in the expected form is very important
    in having this work correctly. If you receive the image values as UINT 0-255
    then it's probably best to return in the same format. (As this
    implementation does). If the image comes in as float values ranging from
    0.0 to 1.0 then this function should be modified to return the same.
    Otherwise this can lead to very frustrating and difficult to troubleshoot
    problems in the image processing pipeline.
    This is 'Fancy PCA' from:
    # http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
    #######################
    #### FROM THE PAPER ###
    #######################
    "The second form of data augmentation consists of altering the intensities
    of the RGB channels in training images. Specifically, we perform PCA on the
    set of RGB pixel values throughout the ImageNet training set. To each
    training image, we add multiples of the found principal components, with
    magnitudes proportional to the corresponding eigenvalues times a random
    variable drawn from a Gaussian with mean zero and standard deviation 0.1.
    Therefore to each RGB image pixel Ixy = [I_R_xy, I_G_xy, I_B_xy].T
    we add the following quantity:
    [p1, p2, p3][α1λ1, α2λ2, α3λ3].T
    Where pi and λi are ith eigenvector and eigenvalue of the 3 × 3 covariance
    matrix of RGB pixel values, respectively, and αi is the aforementioned
    random variable. Each αi is drawn only once for all the pixels of a
    particular training image until that image is used for training again, at
    which point it is re-drawn. This scheme approximately captures an important
    property of natural images, namely, that object identity is invariant to
    change."
    ### END ###############
    Other useful resources for getting this working:
    # https://groups.google.com/forum/#!topic/lasagne-users/meCDNeA9Ud4
    # https://gist.github.com/akemisetti/ecf156af292cd2a0e4eb330757f415d2
    '''

    orig_img = img.astype(float).copy()

    img = img / 255.0  # rescale to 0 to 1 range

    # flatten image to columns of RGB
    img_rs = img.reshape(-1, 3)
    # img_rs shape (640000, 3)

    # center mean
    img_centered = img_rs - np.mean(img_rs, axis=0)

    # paper says 3x3 covariance matrix
    img_cov = np.cov(img_centered, rowvar=False)

    # eigen values and eigen vectors
    eig_vals, eig_vecs = np.linalg.eigh(img_cov)

#     eig_vals [0.00154689 0.00448816 0.18438678]

#     eig_vecs [[ 0.35799106 -0.74045435 -0.56883192]
#      [-0.81323938  0.05207541 -0.57959456]
#      [ 0.45878547  0.67008619 -0.58352411]]

    # sort values and vector
    sort_perm = eig_vals[::-1].argsort()
    eig_vals[::-1].sort()
    eig_vecs = eig_vecs[:, sort_perm]

    # get [p1, p2, p3]
    m1 = np.column_stack((eig_vecs))

    # get 3x1 matrix of eigen values multiplied by random variable draw from normal
    # distribution with mean of 0 and standard deviation of 0.1
    m2 = np.zeros((3, 1))
    # according to the paper alpha should only be draw once per augmentation (not once per channel)
    alpha = np.random.normal(0, alpha_std)

    # broad cast to speed things up
    m2[:, 0] = alpha * eig_vals[:]

    # this is the vector that we're going to add to each pixel in a moment
    add_vect = np.matrix(m1) * np.matrix(m2)

    for idx in range(3):   # RGB
        orig_img[..., idx] += add_vect[idx]

    # for image processing it was found that working with float 0.0 to 1.0
    # was easier than integers between 0-255
    # orig_img /= 255.0
    orig_img = np.clip(orig_img, 0.0, 255.0)

    # orig_img *= 255
    orig_img = orig_img.astype(np.uint8)

    # about 100x faster after vectorizing the numpy, it will be even faster later
    # since currently it's working on full size images and not small, square
    # images that will be fed in later as part of the post processing before being
    # sent into the model
#     print("elapsed time: {:2.2f}".format(time.time() - start_time), "\n")

    return orig_img


def save_img_box_xml(ro_img_box,xml_dir):
    '''  将ro_img_box包含的信息，保存到一个xml文件中
    :param ro_img_box: 经过旋转后的框(对应着一张图片)包含的所有信息
    xml_dir:xml文件保存目录dir
    :return:
    '''
    import xml.etree.ElementTree as ET
    annotation = ET.Element('annotation')
    folder = ET.SubElement(annotation, 'folder')
    folder.text = 'video_mat'

    filename = ET.SubElement(annotation, 'filename')
    filename.text = ro_img_box['filename']

    path = ET.SubElement(annotation, 'path')
    path.text = ro_img_box['filename'] + '.jpg'

    source = ET.SubElement(annotation, 'source')

    database = ET.SubElement(source, 'database')
    database.text = 'Unknown'

    size = ET.SubElement(annotation, 'size')
    width = ET.SubElement(size, 'width')
    width.text = str(ro_img_box['width'])
    height = ET.SubElement(size, 'height')
    height.text = str(ro_img_box['height'])
    depth = ET.SubElement(size, 'depth')
    depth.text = '3'

    segmented = ET.SubElement(annotation, 'segmented')
    segmented.text = '0'

    for box in ro_img_box['bbox']:
        object = ET.SubElement(annotation, 'object')

        type = ET.SubElement(object, 'type')
        type.text = 'robndbox'

        name = ET.SubElement(object, 'name')
        name.text = '1'

        pose = ET.SubElement(object, 'pose')
        pose.text = 'Unspecified'

        truncated = ET.SubElement(object, 'truncated')
        truncated.text = '0'

        difficult = ET.SubElement(object, 'difficult')
        difficult.text = '0'

        robndbox = ET.SubElement(object, 'robndbox')

        cx = ET.SubElement(robndbox, 'cx')
        cx.text = str(box[0])
        cy = ET.SubElement(robndbox, 'cy')
        cy.text = str(box[1])
        w = ET.SubElement(robndbox, 'w')
        w.text = str(box[2])
        h = ET.SubElement(robndbox, 'h')
        h.text = str(box[3])
        angle = ET.SubElement(robndbox, 'angle')
        angle.text = str(box[4] / 180 * math.pi)
    tree = ET.ElementTree(annotation)
    tree.write(os.path.join(xml_dir, ro_img_box['filename'] + '.xml'), encoding='utf-8')

root_path = '../COSSY_train_shao2/Market2/'
dest_path = '../COSSY_train_shao2/Market2_enhance/'
def rotate_img_box(root_path,dest_path,imgs_boxes,method='histogram'):
    '''
    :param root_path:
    :param dest_path:
    imgs_boxes:框和图像一一对应的列表
    :param method: 可以是直方图旋转('histogram')，也可以是最大角度变化旋转然后保存文件('max_diff_degree')
    :return:ro_imgs_boxs: 旋转后的所有框
    '''
    ro_imgs_boxs = list()
    if not os.path.exists(dest_path):
        os.mkdir(dest_path)
    for img_box in tqdm(imgs_boxes):
        ro_img_box = dict()
        img = Image.open(os.path.join(root_path,img_box['filename']+'.jpg'))
    #    print('原始角度为:{}'.format(img_box['angle']))
    #    print('旋转后角度为:{}'.format(img_box['ro_angle']))
    #    print('旋转角度为:{}'.format(img_box['ro_angle']-img_box['angle']))
        if method == 'histogram':
            ro_img,ro_label = rotate(img,(img_box['ro_angle']-img_box['angle']),labels=torch.tensor(img_box['bbox']))
            # 对应该张旋转图像的单个box(也就是我们最终的目标，一个框box对应着一个图片)
            ro_img_box['corresponding_box'] = rotate_one_box(img_box['corresponding_box'],
                                                    (img_box['ro_angle']-img_box['angle']))
        elif method == 'max_diff_degree':
            ro_angle = 360 * (img_box['box_index'] / img_box['max_box_index'])
            ro_img, ro_label = rotate(img, ro_angle, labels=torch.tensor(img_box['bbox']))
            # 对应该张旋转图像的单个box(也就是我们最终的目标，一个框box对应着一个图片)
            ro_img_box['corresponding_box'] = rotate_one_box(img_box['corresponding_box'],
                                                             ro_angle)
        ro_filename = img_box['filename'].split('_')[-1] + '_' + str(img_box['box_index'])
        ro_img_box['width'] = img_box['width']
        ro_img_box['height'] = img_box['height']
        ro_img_box['filename'] = ro_filename
        ro_img_box['bbox'] = np.array(ro_label.data).tolist()    # 用list()只能转换外面的一层

        ro_img_box['L2dist'] = img_box['L2dist']
        # 对图像进行PCA增强
        # numpy array 和PIL image之间转换 https://blog.csdn.net/qq_30159015/article/details/80070514
        ro_img = fancy_pca(np.array(ro_img), img_box['gaussian_std'])
        ro_img = Image.fromarray(ro_img.astype('uint8')).convert('RGB')
    #    print('高斯标准差为:{}'.format(img_box['gaussian_std']))
        ro_imgs_boxs.append(ro_img_box)
        ro_img.save(os.path.join(dest_path,
                                 ro_filename + '.jpg'))
    #    print(ro_img_box)
        # 将数据保存成单个的xml文件
        save_img_box_xml(ro_img_box,dest_path)
    return ro_imgs_boxs
def plot_radius_distribution(L2_dist,seperate_group=20,img_width=1080,img_height=1080,
                             x_label='box center L2_dist from img center',
                             y_title='box numbers in corresponding range',
                             title='L2dist Radius distribution'):
    '''
    绘制L2距离的分布图，我们希望img_width和img_height是相同的，同时在Market1和2中是相同的
    :param L2_dist: 所有需要绘制的L2距离列表
    :param seperate_group: 对半径划分的比例，默认是20
    :param img_width: 所有这些boxes对应的图像的宽
    :param img_height: 所有这些boxes对应的图像的长
    :return:
    '''
    img_radius = max(img_width,img_height)//2
    single_distance = img_radius//seperate_group
    L2_dist = np.array(L2_dist)      # 获取L2距离
    X = np.linspace(0, img_radius - single_distance, seperate_group)
    X_num = np.append(X, img_radius)
    Y = list()
    for i in range(0, len(X_num) - 1):
        num = ((L2_dist >= (X_num[i])) & (L2_dist <= (X_num[i + 1]))).sum()
        Y.append(num)
    fig = plt.figure()
    plt.bar(X, Y, width=single_distance//2, color="green")
    plt.xlabel(x_label)
    plt.ylabel(y_title)
    plt.title(title)

    plt.show()
    return X,np.array(Y)

def get_image_filename(ro_imgs_boxs,image_width = 1080,
                       image_height = 1080, seperate_group=20):
    '''
    需要获取一个列表和一个字典，一个是所有经过角度直方图旋转增强图片组成的列表，
    另一个是划分好半径范围的字典，键为半径组别：值为字典，字典的所有键为所有
    在这个组别中的图片名字，该图片也是经过旋转增强的，值为与图片名字一一对应的box
    :param ro_imgs_boxs: rotate_img_box返回值
    :param seperate_group: 对半径划分的组数，默认为20
    :return:
    '''
    all_img_list = list()
    radius_imgs_dict = defaultdict(dict)
    radius = max(image_width,image_height)//2
    single_distance = radius//seperate_group
    for ro_img_box in ro_imgs_boxs:
        all_img_list.append(ro_img_box['filename'])
        group = math.floor(ro_img_box['L2dist']/single_distance)   # 这里group从0开始
        radius_imgs_dict[group][ro_img_box['filename']] = ro_img_box['corresponding_box']
    return all_img_list,radius_imgs_dict

def get_group(rand0_1,distribution):
    '''
    :param rand0_1:  0-1之间的随机数
    :param distribution: 采样分布
    :return: group：采样的组别0 ---- (len(distribution)-1)
    '''
    group = 0
    while rand0_1 >0:
        rand0_1 -= distribution[group]
        group +=1
    return (group-1)
def get_info_from_xml(xml_path):
    '''
    从xml文件中获取我们需要的图片名字，长、宽以及对应的boxes
    :param xml_path:
    :return:filename,width,height以及列表形式的boxes
    '''
    boxes = list()
    tree = ET.parse(xml_path)
    root = tree.getroot()
    # 获取xml文件相应内容
    for child in root:
        if (child.tag == 'filename'):
            filename = child.text
        elif (child.tag == 'size'):
            for item in child:
                if item.tag == 'width':
                    width = int(item.text)
                elif item.tag == 'height':
                    height = int(item.text)
        elif (child.tag == 'object'):  # 检测对象
            one_person_box = list()
            for item in child:  # type,name,pose,truncated,difficult,robndbox.
                if item.tag == 'robndbox':
                    for box in item:
                        one_person_box.append(float(box.text))
                    # 对角度进行变换，将弧度转换成角度，同时和RAPiD论文中的角度对应的情况
                    # 如果rolabelImg标注数据h>w，则角度值小于90度的不变换，大于等于90度的结果减去180度
                    # 如果rolabelImg标注数据w>h，则角度值小于90度的减去90°，大于等于90度的结果减去90°
                    if one_person_box[2] > one_person_box[3]:  # w>h,同时需要交换w和h的值
                        temp = one_person_box[2]
                        one_person_box[2] = one_person_box[3]
                        one_person_box[3] = temp
                        one_person_box[4] = one_person_box[4] * 180 / math.pi - 90
                    else:  # h>w
                        angle = one_person_box[4] * 180 / math.pi
                        one_person_box[4] = angle if (angle < 90) else (angle - 180)
                    # 由于浮点计算存在误差，需要对数据进行处理
                    if (one_person_box[4] >= 90) or (one_person_box[4] < -90):  # 大于等于90度，或者小于-90度 情况
                        one_person_box[4] = -90
            boxes.append(copy.deepcopy(one_person_box))
    return filename,width,height,boxes

###########################################################################################
# 这些是计算cutup需要使用的文件
# 计算(x1,y1)(x,y)、(x2,y2)(x,y)向量的叉乘
def GetCross(x1,y1,x2,y2,x,y):
    a=(x2-x1,y2-y1)
    b=(x-x1,y-y1)
    return a[0]*b[1]-a[1]*b[0]

# 判断(x,y)是否在矩形内部
def isInSide(x1,y1,x2,y2,x3,y3,x4,y4,x,y):
    return GetCross(x1,y1,x2,y2,x,y)*GetCross(x3,y3,x4,y4,x,y)>=0 and GetCross(x2,y2,x3,y3,x,y)*GetCross(x4,y4,x1,y1,x,y)>=0

# 计算框四个点的坐标
def xxyy(img_x,img_y,imgw,imgh,imgt):
    x1 = (-imgw/2) * math.cos(math.radians(imgt)) - (-imgh/2) * math.sin(math.radians(imgt)) + img_x
    y1 = (-imgh/2) * math.cos(math.radians(imgt)) + (-imgw/2) * math.sin(math.radians(imgt)) + img_y
    x2 = (imgw / 2) * math.cos(math.radians(imgt)) - (-imgh / 2) * math.sin(math.radians(imgt)) + img_x
    y2 = (-imgh / 2) * math.cos(math.radians(imgt)) + (imgw / 2) * math.sin(math.radians(imgt)) + img_y
    x3 = (imgw / 2) * math.cos(math.radians(imgt)) - (imgh / 2) * math.sin(math.radians(imgt)) + img_x
    y3 = (imgh / 2) * math.cos(math.radians(imgt)) + (imgw / 2) * math.sin(math.radians(imgt)) + img_y
    x4 = (-imgw / 2) * math.cos(math.radians(imgt)) - (imgh / 2) * math.sin(math.radians(imgt)) + img_x
    y4 = (imgh / 2) * math.cos(math.radians(imgt)) + (-imgw / 2) * math.sin(math.radians(imgt)) + img_y
    return x1,y1,x2,y2,x3,y3,x4,y4

###########################################################################################

def frequency_mixup(all_img_list,radius_imgs_dict,distribution,
                    generate_mixup_nums,image_source_dir,
                    xml_source_dir,image_des_dir,
                    xml_des_dir,
                    xml_prefix='mix_',method='cutup'):
    '''
    使用Frequency mix up策略生成新的图片以及xml文件，
    新的xml文件和图片的前缀为mix_，生成顺序从 1 一直到 generate_mixup_nums
    :param all_img_list:      所有经过旋转的图片组成的列表
    :param radius_imgs_dict:   包含所有经过旋转后的图片，不过是按照半径的范围划分。这是一个
                                字典，{0:['2345','2541'],...}这种形式的
    :param distribution:       采样第一张图片的概率分布
    :param generate_mixup_nums: 生成新的数据集(mix up)的数量
    :param image_source_dir: 处理前的图片路径
    :param xml_source_dir:   处理前的xml文件路径
    :param image_des_dir:    处理后的图片路径
    :param xml_des_dir:      处理后的xml文件路径
    :param xml_prefix:  生成的新图片的xml前缀以及新图片的xml前缀
    :param method:     取值可以为'cutup'或者'mixup'但是mixup方案我们已经测试不能使用了，
                    关于cutup的第一版，我们可以尝试直接拷贝，在第二版上，我们可以考虑
                    可能出现的框覆盖问题，使得生成的最后的图中的框人不会出现重叠的问题
                    关于mixup方法的问题，我们可以想一些方法看看能不能解决这个问题
    :return:
    '''
    assert method in ['cutup','mixup']
    # 创建挑选出来的商场图片存放路径。如果已经存在，则删除
    for dir in [image_des_dir,xml_des_dir]:
        if not os.path.exists(dir):
            os.mkdir(dir)
        else:
            shutil.rmtree(dir)  # 清除文件夹，并重新创建
            os.mkdir(dir)
    for i in tqdm(range(1,(generate_mixup_nums+1))):   # 每一次循环生成一个xml文件
        # 获取两个图片用于做mix up
        random_filename = all_img_list[random.randint(0,len(all_img_list)-1)]
        # 另一张图片我们先以distribution选取一个半径范围，然后在这个半径范围内任意选取一张图片
        probability = random.uniform(0,1)
        group = get_group(probability,distribution)
        sample_filename = list(radius_imgs_dict[group].keys())[random.randint(0, len(radius_imgs_dict[group]) - 1)]


        xml_mixup_dict = dict()
        xml_mixup_dict['filename'] = xml_prefix+'{}'.format(i)
        # 获取两张图片的xml信息，同时进行存储到xml文件中
        _,xml_random_width,xml_random_height,xml_random_boxes = get_info_from_xml(os.path.join(xml_source_dir,random_filename+'.xml'))
        _,xml_sample_width,xml_sample_height,xml_sample_boxes = get_info_from_xml(os.path.join(xml_source_dir,sample_filename+'.xml'))
        assert xml_random_width==xml_sample_width
        assert xml_random_height==xml_sample_height
        xml_mixup_dict['width'] = xml_random_width
        xml_mixup_dict['height'] = xml_random_height
        xml_mixup_dict['bbox'] = list()
        xml_mixup_dict['bbox'].append(radius_imgs_dict[group][sample_filename])
        [xml_mixup_dict['bbox'].append(box) for box in xml_random_boxes]
        # 保存到新的xml文件中
        save_img_box_xml(xml_mixup_dict,xml_des_dir)

        # 获取两张图像进行相加
        image_random = np.array(Image.open(os.path.join(image_source_dir, random_filename + '.jpg')))
        image_sample = np.array(Image.open(os.path.join(image_source_dir, sample_filename + '.jpg')))
        if method=='mixup':
            lam = np.random.beta(20, 20)
            lam = 0.5  # 为了图像中两个人都不是很浅，因此做等权重mixup
            image_mixup = lam * image_random + (1 - lam) * image_sample
            image_mixup = Image.fromarray(image_mixup.astype('uint8')).convert('RGB')  # 转换成PIL 图
            image_mixup.save(os.path.join(image_des_dir, xml_prefix+'{}.jpg'.format(i)))  # 进行图片保存
        elif method=='cutup':     # 这里我们将image_sample图像中的人切下来放到 image_random 中
            image_mixup = image_random.copy()
            for box in [radius_imgs_dict[group][sample_filename]]:
                x1, y1, x2, y2, x3, y3, x4, y4 = xxyy(box[0], box[1], box[2], box[3], box[4])  # 获取box的4个点坐标
                for p in range(int(min(x1, x2, x3, x4)) - 1, int(max(x1, x2, x3, x4)) + 1):
                    for q in range(int(min(y1, y2, y3, y4)) - 1, int(max(y1, y2, y3, y4)) + 1):
                        if isInSide(x1, y1, x2, y2, x3, y3, x4, y4, p, q):
                            image_mixup[q, p, :] = image_sample[q, p, :]
            image_mixup = Image.fromarray(image_mixup.astype('uint8')).convert('RGB')  # 转换成PIL 图
            image_mixup.save(os.path.join(image_des_dir, xml_prefix + '{}.jpg'.format(i)))  # 进行图片保存
def sigmoid_fun(x):
    return 1 / (1 + np.exp(-x))

def softmax_fun(x):
    return np.exp(x) / (np.sum(np.exp(x)) + 1e-16)
def get_sample_distribution(old_distirbution,method='reciprocal_ratio',show_plot=True):
    '''
    从一个不均匀的离散分布old_distirbution中获取为采样概率，使得利用该采样概率对分布进行采样得到的分布
    是一个均匀的
    :param old_distirbution: 不均匀的离散分布,numpy array
    :param method: method有两种方式：
        'reciprocal_ratio':对old_distribution取倒数，然后进行比例变换，保证新得到的离散分布之和为1
        'exchange_probability':对old_distribution中概率为0的地方保持不变，对概率非零的地方进行交换
            概率值：交换方法为：概率最大的和概率最小的进行交换(除0概率位置外)，概率次大的和概率次小的进行交换，
            以此类推
    :param show_plot: 显示old_distirbution 以及 sample_distirbution的图像
    :return: sample_distirbution得到的是采样分布
    '''
    assert method in ['exchange_probability','reciprocal_ratio']
    if(method=='exchange_probability'):
        distribution_zero_num = (L2_distribution == 0).sum()  # 统计分布中0的数量，我们对于原始概率为0的，变换后概率依旧为0
        sort_result = sorted(enumerate(old_distirbution), key=lambda x: x[1])
        distribution_variable = np.array([x[1] for x in sort_result])
        index = {x[0]: i for i, x in enumerate(sort_result)}
        index = sorted(index.items(), key=lambda x: x[0])  # 按键对字典进行排序
        rank = [x[1] for x in index]  # 正常排序
        # 减去排在前面的 distribution_zero_num 得到新的排序,思想就是前distribution_zero_num直接排到了最后的distribution_zero_num位上
        rank = np.array([k - distribution_zero_num if (k - distribution_zero_num) >= 0 else (
                    len(L2_distribution) + (k - distribution_zero_num)) for k in rank])
        sample_distirbution = distribution_variable[len(distribution_variable) - 1 - rank]  # 进行名词交换
    elif(method=='reciprocal_ratio'):
        distribution_variable = np.array([0 if k == 0 else 1 / k for k in old_distirbution])
        ratio = 1 / distribution_variable.sum()
        sample_distirbution = ratio * distribution_variable
    if show_plot:
        fig = plt.figure()
        X = np.arange(1,len(old_distirbution)+1,1)
        plt.plot(X, old_distirbution)
        plt.plot(X, sample_distirbution)
        plt.legend(['old distribution','sample distirbution'])
        plt.title('distribution change')
        plt.show()
    return sample_distirbution

if 0:
#if __name__ == '__main__':
    json_path = '../COSSY_train_new_back/annotations/Market1.json'
    imgs_boxes, _ = boxTo_image(json_path)
    # 获取原始的box分布
    origin_L2dist = list()
    for img_box in imgs_boxes:
        origin_L2dist.append(img_box['L2dist'])
    X,L2_distribution = plot_radius_distribution(origin_L2dist)

    # 去掉非零的区域
    fig = plt.figure()
    L2_distribution = L2_distribution/np.sum(L2_distribution)
    plt.plot(X, L2_distribution)
    plt.axis([0, 540, 0, 1])

    L2_new_distribution_Derivative = L2_distribution[:-1] - L2_distribution[1:]
    num = list()
    while len(L2_new_distribution_Derivative) != 0:
        num.append(L2_new_distribution_Derivative.sum())
        L2_new_distribution_Derivative = L2_new_distribution_Derivative[:-1]
    num = np.array(num)
    #print(num.sum())
    # 找到num中最小的那个
    min_num = np.min(num)
    ratio = 1/(-(len(num)+1)*min_num+num.sum())
    assert (ratio > 0) & (ratio<1)
    p_base = -min_num*ratio
    assert (p_base > 0) & (p_base < 1)
    sample_distribution = [p_base]
    sample_distribution.extend(list((ratio*num[::-1]+p_base)))
    print(sample_distribution)
    # 得到了新的分布 sample_distribution
    #
    fig = plt.figure()
    plt.plot(X, (1 - L2_distribution))
    plt.axis([0, 540, 0, 1])
    plt.plot(X, sample_distribution)
    #plt.plot(X,softmax_fun(sample_distribution))                     # 过了softmax之后
    plt.show()
    print(ratio)
    #print(L2_distribution)
    fig = plt.figure()
    plt.plot(X, (1 - L2_distribution))
    plt.axis([0, 540, 0, 1])
    fig = plt.figure()
    sample_distribution = (1-L2_distribution)/np.sum(1-L2_distribution)
    plt.plot(X, sample_distribution)
    #print(sample_distribution)
    plt.axis([0, 540, 0, 1])
    fig = plt.figure()
    sample_distribution = (1/L2_distribution)
    #sample_distribution = sigmoid_fun(1/L2_distribution)
    plt.plot(X[:-1],sample_distribution)
    plt.show()
    '''
    root_path = '../COSSY_train_new_back/Market1/'
    dest_path = '../COSSY_train_new_back/Market1_enhance/'
    ro_imgs_boxs = rotate_img_box(root_path, dest_path, imgs_boxes)   # 获取旋转后的图像框
    '''

if 0:
#if __name__ == '__main__':         # 思路7
    json_path = '../COSSY_train_new_back/annotations/Market1.json'
    imgs_boxes, _ = boxTo_image(json_path)
    # 获取原始的box分布
    origin_L2dist = list()
    for img_box in imgs_boxes:
        origin_L2dist.append(img_box['L2dist'])
    X,L2_distribution = plot_radius_distribution(origin_L2dist)
    distribution_zero_num = (L2_distribution == 0).sum()            # 统计分布中0的数量，我们对于原始概率为0的，变换后概率依旧为0
    # 去掉非零的区域
    fig = plt.figure()
    L2_distribution = L2_distribution/np.sum(L2_distribution)
    plt.plot(X, L2_distribution)
    plt.axis([0, 540, 0, 1])

    sort_result = sorted(enumerate(L2_distribution), key=lambda x: x[1])
    L2_distribution = np.array([x[1] for x in sort_result])
    index = {x[0]:i for i,x in enumerate(sort_result)}
    index = sorted(index.items(), key=lambda x: x[0])     # 按键对字典进行排序
    rank = [x[1] for x in index]                 # 正常排序
    # 减去排在前面的 distribution_zero_num 得到新的排序,思想就是前distribution_zero_num直接排到了最后的distribution_zero_num位上
    rank = np.array([k-distribution_zero_num if(k-distribution_zero_num)>=0 else (
            len(L2_distribution)+(k-distribution_zero_num))  for k in rank])
    L2_distribution_new = L2_distribution[len(L2_distribution)-1-rank]        # 进行名词交换
    plt.plot(X, L2_distribution_new)
    plt.axis([0, 540, 0, 1])

    plt.show()
if 0:
#if __name__ == '__main__':         # 思路8
    Debug = True
    json_path = '../COSSY_train_new_back/annotations/Market1.json'
    imgs_boxes, _ = boxTo_image(json_path)
    # 获取原始的box分布
    origin_L2dist = list()
    for img_box in imgs_boxes:
        origin_L2dist.append(img_box['L2dist'])
    X,L2_distribution = plot_radius_distribution(origin_L2dist)

    # 去掉非零的区域
    fig = plt.figure()
    L2_distribution = L2_distribution/np.sum(L2_distribution)
    plt.plot(X, L2_distribution)
    plt.axis([0, 540, 0, 1])

    L2_distribution_new = np.array([0 if k==0 else 1/k for k in L2_distribution])
    ratio = 1/L2_distribution_new.sum()
    L2_distribution_new = ratio*L2_distribution_new
    plt.plot(X, L2_distribution_new)
    plt.axis([0, 540, 0, 1])

    plt.show()
#if 0:
if __name__ == '__main__':         # 思路8
    Debug = True
    json_path = '../COSSY_train_new_back/annotations/Market1.json'
    imgs_boxes, _ = boxTo_image(json_path)
    # 获取原始的box分布
    origin_L2dist = list()
    for img_box in imgs_boxes:
        origin_L2dist.append(img_box['L2dist'])
    X,L2_distribution = plot_radius_distribution(origin_L2dist)

    L2_distribution = L2_distribution/np.sum(L2_distribution)

    L2_distribution_sample = get_sample_distribution(L2_distribution,method='exchange_probability')
    # 需要获取一个列表和一个字典，一个是所有的图片列表，另一个是划分好半径范围的字典，键为半径组别：值为该半径组别中对应的图片
    root_path = '../COSSY_train_new_back/Market1/'
    dest_path = '../COSSY_train_new_back/Market1_enhance/'
    if not Debug:
        ro_imgs_boxs = rotate_img_box(root_path, dest_path, imgs_boxes)  # 获取旋转后的图像框
        with open('../COSSY_train_new_back/Market1_enhance/ro_imgs_boxs.json', 'w') as f:
            f.write(json.dumps(ro_imgs_boxs,indent=1,separators=(',',':')))

    with open('../COSSY_train_new_back/Market1_enhance/ro_imgs_boxs.json', 'r') as f:
        ro_imgs_boxs = json.load(f)

    all_img_list,radius_imgs_dict = get_image_filename(ro_imgs_boxs)

    image_source_dir = '../COSSY_train_new_back/Market1_enhance/'
    xml_source_dir = '../COSSY_train_new_back/Market1_enhance/'
    image_des_dir = '../COSSY_train_new_back/Market1_frequencyMU/'
    xml_des_dir = '../COSSY_train_new_back/Market1_frequencyMU/'
    frequency_mixup(all_img_list, radius_imgs_dict, L2_distribution_sample,
                    generate_mixup_nums=2817,image_source_dir=image_source_dir,
                    xml_source_dir=xml_source_dir, image_des_dir=image_des_dir,
                    xml_des_dir = xml_des_dir,
                    xml_prefix='mix_',method='cutup')
