'''
  data:20201102
  本周任务：进行数据融合，保证沿中心点距离的框近似服从一个均匀分布
  思考过程：
    我们需要得到一个沿着半径中心点近似均匀分布的图像，由于我们的统计是对box来做的，
    并且我们的衡量指标为box的均匀分布，也就是人的均匀分布。因此，目前我的一个想法就是：
    将图像扩充到box数量上，也就是：比如一张图像中有3个box，那么我们将一张图像扩充到3张，
    同理，框的数量越多，则扩充后的图像数量越多，我们应该考虑到这些图像之间应该存在着较大的差异。

    我们打算的一个前提假设是：鱼眼数据中所有图中的人数近似服从一个均匀分布的
    我的打算是：在代码中没有使用到Fancy PCA，可以使用Fancy PCA对数据进行增强。
    同时对图像进行特定程度的旋转增强，其中进行特定程度旋转增强的方式考虑到两种：假定我们要对1.jpg(包含3个框，分别对应1号框，2号框和3号框，每个框对应着一幅图像)做变换。
    * 进行旋转增强时，保证得到的3张图片之间的角度差异最大，此时我们需要分别进行0°，120°，240°旋转得到3张图像。
    * 进行旋转增强时，考虑到我们需要保证box中心点和沿图像中心点水平方向[0,2$\pi$]夹角服从均匀分布，
      我们首先对数据进行直方图均衡，然后按照均衡得到的1号框，2号框，和3号框的变换后角度，对原始图像分别进行进行一个
      角度旋转变换，此时对于1号框，2号框以及3号框的旋转角度变化不大，因此为了增加差异性，我们进行PCA变换。
  接下来是实现过程：我想的是两种方法在同一个框架下实现：
    1.首先，要得到每一幅图像的变换角度列表[]
        首先获取图像，以及bbox列表，其中bbox列表中应该包含和中心点和夹角
    2.其次，对图像进行旋转变换

    进行fancy PCA的高斯分布std从[0,1]之间均匀采样
'''
from tools.get_data_info import get_json_data
import copy
import math

# 定义列表，其中元素为img_box，为字典，字典的键值包括:file_name,bbox,以及需要计算的旋转角度
imgs_boxes = list()
one_file_info,_ = get_json_data('../COSSY_train_shao2/annotations/Market2.json',write_to_txt=False)
one_file_boxes_angle = list()          # 统计所有角度
import numpy as np

for img in one_file_info:
    box_index = 0
    for box in img['boxes']:

        img_box = dict()
        # 计算和中心坐标的夹角
        img_cx = img['width']//2        # 中心点坐标
        img_cy = img['height']//2
        angle = math.atan2((box[1]-img_cy),(box[0]-img_cx))   # 角度范围为[-pi,pi]，将其变换到0-2pi
        angle = angle/math.pi*180 if (angle>=0) else (2*math.pi+angle)/math.pi*180    # 变化角度范围到[0,360]
        #
        one_file_boxes_angle.append(copy.deepcopy(angle))
        img_box['width'] = img['width']
        img_box['height'] = img['height']
        img_box['filename'] =img['filename']
        img_box['bbox'] = img['boxes']                    # 需要保存所有的boxes
        img_box['angle'] = angle
        img_box['ro_angle'] = None
        img_box['box_index'] = box_index
        img_box['max_box_index'] = len(img['boxes'])
        img_box['gaussian_std'] = float((img_box['box_index']+1)/img_box['max_box_index'])*1
        box_index = box_index + 1
        imgs_boxes.append(copy.deepcopy(img_box))
one_file_boxes_angle.sort()     # 对数据进行排序
total_boxes = len(one_file_boxes_angle)
# 取积分时，积分上界取不到
for img_box in imgs_boxes:
    index = one_file_boxes_angle.index(img_box['angle'])
    img_box['ro_angle'] = float(index/total_boxes)*2*180  # 再映射到[0,360]上

##########
# 对图像进行旋转
import torchvision.transforms.functional as tvf
from PIL import Image
import torch
def rotate(image, degrees, labels, expand=False):
    '''
    image: PIL.Image
    labels: tensor, shape(N,5), absolute x,y,w,h, angle in degree
    '''
    img_w, img_h = image.width, image.height
    image = tvf.rotate(image, angle=-degrees, expand=expand)
    new_w, new_h = image.width, image.height
    # image coordinate to cartesian coordinate
    x = labels[:, 0] - 0.5 * img_w
    y = -(labels[:, 1] - 0.5 * img_h)
    # cartesian to polar
    r = (x.pow(2) + y.pow(2)).sqrt()

    theta = torch.empty_like(r)
    theta[x >= 0] = torch.atan(y[x >= 0] / x[x >= 0])
    theta[x < 0] = torch.atan(y[x < 0] / x[x < 0]) + np.pi
    theta[torch.isnan(theta)] = 0
    # modify theta
    theta -= (degrees * np.pi / 180)
    # polar to cartesian
    x = r * torch.cos(theta)
    y = r * torch.sin(theta)
    labels[:, 0] = x + 0.5 * new_w
    labels[:, 1] = -y + 0.5 * new_h
    labels[:, 4] += degrees
    labels[:, 4] = torch.remainder(labels[:, 4], 180)
    labels[:, 4][labels[:, 4] >= 90] -= 180

    return image, labels
def fancy_pca(img, alpha_std=0.1):
    '''
    INPUTS:
    img:  numpy array with (h, w, rgb) shape, as ints between 0-255)
    alpha_std:  how much to perturb/scale the eigen vecs and vals
                the paper used std=0.1
    RETURNS:
    numpy image-like array as float range(0, 1)
    NOTE: Depending on what is originating the image data and what is receiving
    the image data returning the values in the expected form is very important
    in having this work correctly. If you receive the image values as UINT 0-255
    then it's probably best to return in the same format. (As this
    implementation does). If the image comes in as float values ranging from
    0.0 to 1.0 then this function should be modified to return the same.
    Otherwise this can lead to very frustrating and difficult to troubleshoot
    problems in the image processing pipeline.
    This is 'Fancy PCA' from:
    # http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
    #######################
    #### FROM THE PAPER ###
    #######################
    "The second form of data augmentation consists of altering the intensities
    of the RGB channels in training images. Specifically, we perform PCA on the
    set of RGB pixel values throughout the ImageNet training set. To each
    training image, we add multiples of the found principal components, with
    magnitudes proportional to the corresponding eigenvalues times a random
    variable drawn from a Gaussian with mean zero and standard deviation 0.1.
    Therefore to each RGB image pixel Ixy = [I_R_xy, I_G_xy, I_B_xy].T
    we add the following quantity:
    [p1, p2, p3][α1λ1, α2λ2, α3λ3].T
    Where pi and λi are ith eigenvector and eigenvalue of the 3 × 3 covariance
    matrix of RGB pixel values, respectively, and αi is the aforementioned
    random variable. Each αi is drawn only once for all the pixels of a
    particular training image until that image is used for training again, at
    which point it is re-drawn. This scheme approximately captures an important
    property of natural images, namely, that object identity is invariant to
    change."
    ### END ###############
    Other useful resources for getting this working:
    # https://groups.google.com/forum/#!topic/lasagne-users/meCDNeA9Ud4
    # https://gist.github.com/akemisetti/ecf156af292cd2a0e4eb330757f415d2
    '''

    orig_img = img.astype(float).copy()

    img = img / 255.0  # rescale to 0 to 1 range

    # flatten image to columns of RGB
    img_rs = img.reshape(-1, 3)
    # img_rs shape (640000, 3)

    # center mean
    img_centered = img_rs - np.mean(img_rs, axis=0)

    # paper says 3x3 covariance matrix
    img_cov = np.cov(img_centered, rowvar=False)

    # eigen values and eigen vectors
    eig_vals, eig_vecs = np.linalg.eigh(img_cov)

#     eig_vals [0.00154689 0.00448816 0.18438678]

#     eig_vecs [[ 0.35799106 -0.74045435 -0.56883192]
#      [-0.81323938  0.05207541 -0.57959456]
#      [ 0.45878547  0.67008619 -0.58352411]]

    # sort values and vector
    sort_perm = eig_vals[::-1].argsort()
    eig_vals[::-1].sort()
    eig_vecs = eig_vecs[:, sort_perm]

    # get [p1, p2, p3]
    m1 = np.column_stack((eig_vecs))

    # get 3x1 matrix of eigen values multiplied by random variable draw from normal
    # distribution with mean of 0 and standard deviation of 0.1
    m2 = np.zeros((3, 1))
    # according to the paper alpha should only be draw once per augmentation (not once per channel)
    alpha = np.random.normal(0, alpha_std)

    # broad cast to speed things up
    m2[:, 0] = alpha * eig_vals[:]

    # this is the vector that we're going to add to each pixel in a moment
    add_vect = np.matrix(m1) * np.matrix(m2)

    for idx in range(3):   # RGB
        orig_img[..., idx] += add_vect[idx]

    # for image processing it was found that working with float 0.0 to 1.0
    # was easier than integers between 0-255
    # orig_img /= 255.0
    orig_img = np.clip(orig_img, 0.0, 255.0)

    # orig_img *= 255
    orig_img = orig_img.astype(np.uint8)

    # about 100x faster after vectorizing the numpy, it will be even faster later
    # since currently it's working on full size images and not small, square
    # images that will be fed in later as part of the post processing before being
    # sent into the model
#     print("elapsed time: {:2.2f}".format(time.time() - start_time), "\n")

    return orig_img

import os
root_path = '../COSSY_train_shao2/Market2/'
dest_path = '../COSSY_train_shao2/Market2_enhance/'
ro_imgs_boxes = list()
if not os.path.exists(dest_path):
    os.mkdir(dest_path)
# 按照直方图进行旋转
from tqdm import tqdm
if 1:
    for img_box in tqdm(imgs_boxes):
        ro_img_box = dict()
        img = Image.open(os.path.join(root_path,img_box['filename']+'.jpg'))
    #    print('原始角度为:{}'.format(img_box['angle']))
    #    print('旋转后角度为:{}'.format(img_box['ro_angle']))
    #    print('旋转角度为:{}'.format(img_box['ro_angle']-img_box['angle']))
        ro_img,ro_label = rotate(img,(img_box['ro_angle']-img_box['angle']),labels=torch.tensor(img_box['bbox']))
        ro_filename = img_box['filename'].split('_')[-1] + '_' + str(img_box['box_index'])
        ro_img_box['width'] = img_box['width']
        ro_img_box['height'] = img_box['height']
        ro_img_box['filename'] = ro_filename
        ro_img_box['bbox'] = list(np.array(ro_label.data))
        ro_imgs_boxes.append(ro_img_box)
        # 对图像进行PCA增强
        # numpy array 和PIL image之间转换 https://blog.csdn.net/qq_30159015/article/details/80070514
        ro_img = fancy_pca(np.array(ro_img), img_box['gaussian_std'])
        ro_img = Image.fromarray(ro_img.astype('uint8')).convert('RGB')
    #    print('高斯标准差为:{}'.format(img_box['gaussian_std']))
        ro_img.save(os.path.join(dest_path,
                                 ro_filename + '.jpg'))
    #    print(ro_img_box)
        # 将数据保存成单个的xml文件
        import xml.etree.ElementTree as ET
        annotation = ET.Element('annotation')
        folder = ET.SubElement(annotation, 'folder')
        folder.text = 'video_mat'

        filename = ET.SubElement(annotation, 'filename')
        filename.text = ro_img_box['filename']

        path = ET.SubElement(annotation, 'path')
        path.text = ro_img_box['filename'] + '.jpg'

        source = ET.SubElement(annotation, 'source')

        database = ET.SubElement(source, 'database')
        database.text = 'Unknown'

        size = ET.SubElement(annotation, 'size')
        width = ET.SubElement(size, 'width')
        width.text = str(ro_img_box['width'])
        height = ET.SubElement(size, 'height')
        height.text = str(ro_img_box['height'])
        depth = ET.SubElement(size, 'depth')
        depth.text = '3'

        segmented = ET.SubElement(annotation, 'segmented')
        segmented.text = '0'

        for box in ro_img_box['bbox']:
            object = ET.SubElement(annotation, 'object')

            type = ET.SubElement(object, 'type')
            type.text = 'robndbox'

            name = ET.SubElement(object, 'name')
            name.text = '1'

            pose = ET.SubElement(object, 'pose')
            pose.text = 'Unspecified'

            truncated = ET.SubElement(object, 'truncated')
            truncated.text = '0'

            difficult = ET.SubElement(object, 'difficult')
            difficult.text = '0'

            robndbox = ET.SubElement(object, 'robndbox')

            cx = ET.SubElement(robndbox, 'cx')
            cx.text = str(box[0])
            cy = ET.SubElement(robndbox, 'cy')
            cy.text = str(box[1])
            w = ET.SubElement(robndbox, 'w')
            w.text = str(box[2])
            h = ET.SubElement(robndbox, 'h')
            h.text = str(box[3])
            angle = ET.SubElement(robndbox, 'angle')
            angle.text = str(box[4]/180*math.pi)
        tree = ET.ElementTree(annotation)
        tree.write(os.path.join(dest_path, ro_img_box['filename'] + '.xml'), encoding='utf-8')

# 按照角度最大化进行旋转
if 0:
    for img_box in imgs_boxes:
        ro_img_box = dict()
        img = Image.open(os.path.join(root_path,img_box['filename']+'.jpg'))
        ro_angle = 360 * (img_box['box_index'] / img_box['max_box_index'])
        print('旋转角度为:{}'.format(ro_angle))
        ro_img,ro_label = rotate(img,ro_angle,labels=torch.tensor(img_box['bbox']))
        ro_filename = img_box['filename']+'_'+str(img_box['box_index'])
        ro_img_box['width'] = img_box['width']
        ro_img_box['height'] = img_box['height']
        ro_img_box['filename'] = ro_filename
        ro_img_box['bbox'] = list(np.array(ro_label.data))
        ro_imgs_boxes.append(ro_img_box)
        # 对图像进行PCA增强
        # numpy array 和PIL image之间转换 https://blog.csdn.net/qq_30159015/article/details/80070514
        ro_img = fancy_pca(np.array(ro_img),img_box['gaussian_std'])
        ro_img = Image.fromarray(ro_img.astype('uint8')).convert('RGB')
        print('高斯标准差为:{}'.format(img_box['gaussian_std']))
        ro_img.save(os.path.join(dest_path,
                                 ro_filename + '.jpg'))
        print(ro_img_box)
        # 将数据保存成单个的xml文件
        import xml.etree.ElementTree as ET

        annotation = ET.Element('annotation')
        folder = ET.SubElement(annotation, 'folder')
        folder.text = 'video_mat'

        filename = ET.SubElement(annotation, 'filename')
        filename.text = ro_img_box['filename']

        path = ET.SubElement(annotation, 'path')
        path.text = ro_img_box['filename'] + '.jpg'

        source = ET.SubElement(annotation, 'source')

        database = ET.SubElement(source, 'database')
        database.text = 'Unknown'

        size = ET.SubElement(annotation, 'size')
        width = ET.SubElement(size, 'width')
        width.text = str(ro_img_box['width'])
        height = ET.SubElement(size, 'height')
        height.text = str(ro_img_box['height'])
        depth = ET.SubElement(size, 'depth')
        depth.text = '3'

        segmented = ET.SubElement(annotation, 'segmented')
        segmented.text = '0'

        for box in ro_img_box['bbox']:
            object = ET.SubElement(annotation, 'object')

            type = ET.SubElement(object, 'type')
            type.text = 'robndbox'

            name = ET.SubElement(object, 'name')
            name.text = '1'

            pose = ET.SubElement(object, 'pose')
            pose.text = 'Unspecified'

            truncated = ET.SubElement(object, 'truncated')
            truncated.text = '0'

            difficult = ET.SubElement(object, 'difficult')
            difficult.text = '0'

            robndbox = ET.SubElement(object, 'robndbox')

            cx = ET.SubElement(robndbox, 'cx')
            cx.text = str(box[0])
            cy = ET.SubElement(robndbox, 'cy')
            cy.text = str(box[1])
            w = ET.SubElement(robndbox, 'w')
            w.text = str(box[2])
            h = ET.SubElement(robndbox, 'h')
            h.text = str(box[3])
            angle = ET.SubElement(robndbox, 'angle')
            angle.text = str(box[4]/180*math.pi)
        tree = ET.ElementTree(annotation)
        tree.write(os.path.join(dest_path, ro_img_box['filename'] + '.xml'), encoding='utf-8')



