'''
要求鱼眼图像中weight和height大小相等
 1. 需要获取xml文件中的旋转框信息
 2. 需要获取其他数据集中旋转框的信息
 xml_data_info中数据元素为字典，每一个字典表示一幅图，其中字典的key有三个
 分别为：filename
        width
        height
        boxes
 其中boxes的为二维列表，list[0]表示为1个框的信息：包括difficult,cx,cy,w,h,angle
 xml_data_boxes为所有数据的boxes信息，为二维列表，其中list[0]表示为1个框的信息，包括difficult,cx/width,cy/height,w/wight,h/height,anglee,area/width*height，中心点L1距离/(根号2*height/2)，中心点L2距离/(height/2)
 将信息整理成 width,height,cx,cy,w,h,angle,difficult,area,中心点L1距离，中心点L2距离
'''

import xml.etree.ElementTree as ET
import numpy as np
import copy
import os



def get_center_distance(x,y,ct_x,ct_y,**kwargs):
    '''
    :param x: box的中心点x坐标
    :param y:
    :param ct_x:
    :param ct_y:
    :param kwargs: distType 取值为L1/L2
    :return: float distance(pixel distance)浮点表示的(像素距离)
    '''
    distType = kwargs.get('distType', None)
    assert distType == 'L1' or distType == 'L2' ,'distType must be L1/L2'
    if distType == 'L2':
        return float(np.sqrt(np.square(x-ct_x)+np.square(y-ct_y)))
    elif distType == 'L1':
        return float(np.abs(x-ct_x) + np.abs(y-ct_y))

# 其中Normalization为Ture表示需要对数据做归一化，False的话，则不需要对数据做归一化
def get_one_xml_data(path,normalization=False):
    '''
    :param path:
    :param normalization:
    :return:
    '''
    tree = ET.parse(path)
    root = tree.getroot()
    one_xml_info = dict()       # 主要获取xml文件的filename，size，以及object三项
    one_xml_boxes_origin = list()       # 单幅图像中的box信息
    one_xml_boxes = list()
    for child in root:
        if (child.tag == 'filename'):
            one_xml_info['filename'] = child.text
            print(child.text)
        elif (child.tag == 'size'):
            for item in child:
                if item.tag == 'width':
                    one_xml_info['width'] = int(item.text)
                elif item.tag == 'height':
                    one_xml_info['height'] = int(item.text)
            assert one_xml_info['width'] == one_xml_info['height'],'image must resize to makesure width == height!'
        elif (child.tag == 'object'):      #  检测对象
            one_xml_box = list()
            for item in child:   # type,name,pose,truncated,difficult,robndbox.
                if item.tag == 'difficult':
                    one_xml_box.append(float(item.text))
                elif item.tag == 'robndbox':
                    for box in item:
                        one_xml_box.append(float(box.text))
            one_xml_boxes_origin.append(copy.deepcopy(one_xml_box))
            one_xml_box.append(copy.deepcopy(one_xml_box[3])*copy.deepcopy(one_xml_box[4]))  # 增加面积
            one_xml_box.append(get_center_distance(one_xml_box[1],one_xml_box[2],
                                                one_xml_info['width']/2.0,one_xml_info['height']/2.0,
                                                distType = 'L1'))  # 增加L1距离
            one_xml_box.append(get_center_distance(one_xml_box[1], one_xml_box[2],
                                                   one_xml_info['width']/2.0, one_xml_info['height'] / 2.0,
                                                   distType='L2'))  # 增加L2距离
            one_xml_boxes.append(one_xml_box)
    one_xml_info['boxes'] = one_xml_boxes_origin       # 仅取前6项 difficult,x,y,cx,cy,angle
    # 对box的坐标做 x/width,y/height,w/width,h/height,L1距离除以{sqrt(2)*width}(最大的L1 距离，假设鱼眼图像圆形为圆形图)
    # L2距离除以width(最大的L2距离)
    if normalization:
        width = one_xml_info['width']
        height = one_xml_info['height']
        for one_xml_box in one_xml_boxes:
            one_xml_box[1] = one_xml_box[1]/width    #normalization x
            one_xml_box[2] = one_xml_box[2]/height   #normalization y
            one_xml_box[3] = one_xml_box[3]/width    #normalization w
            one_xml_box[4] = one_xml_box[4]/height   #normalization h
            one_xml_box[7] = one_xml_box[7]/width*np.sqrt(2) #normalization L1
            one_xml_box[8] = one_xml_box[8]/width    # normalization L2
    return one_xml_info,one_xml_boxes
#a,b = get_one_xml_data('/home/zsq/Project/dataset/141.xml',normalization=False)
#xml_data_info.append(a)
#[xml_data_boxes.append(i) for i in b]

data_root_path = '/home/zsq/Project/dataset/'
def get_dir_xml_data(root_path,normalization=False):
    data_format = '.xml'
    # 全部的xml数据
    xml_data_info = list()  # 定义xml数据为列表，数据元素为字典类型
    # xml_data_boxes = np.empty()  # 定义为np数组类型
    xml_data_boxes = list()  # 定义为np数组类型
    # 接下来将data_root_path文件夹下所有xml文件信息全部读取
    for file in os.listdir(root_path):
        if file.endswith(data_format):
            one_xml_info, one_xml_boxes = get_one_xml_data(os.path.join(root_path, file), normalization=normalization)
            xml_data_info.append(one_xml_info)
            print('文件:'+one_xml_info['filename']+',人数:%d'%(len(one_xml_boxes)))
            [xml_data_boxes.append(one_xml_box[1:]) for one_xml_box in one_xml_boxes]  # 不将difficult 信号传递进来

    return xml_data_info,xml_data_boxes
'''
if __name__ == '__main__':
    data_root_path = '/home/zsq/Project/dataset/'
    xml_data_info,xml_data_boxes = get_dir_xml_data(data_root_path)
    print('over')
#    pass
'''
# 以上是针对文件单独存储的xml来说的,接下来是读取json文件的内容
import json
from collections import defaultdict

def get_json_data(path,normalization=False,write_to_txt=False):
    one_file_info = list()  # 定义xml数据为列表，数据元素为字典类型
    # xml_data_boxes = np.empty()  # 定义为np数组类型
    one_file_boxes = list()  # 定义为np数组类型

    one_img_box = defaultdict(list)  # 单幅图像中的box信息

    print(f'Loading annotations {path} into memory...')
    with open(path, 'r') as f:
        json_data = json.load(f)
    for ann in json_data['annotations']:
        img_id = ann['image_id']
        one_img_box[img_id].append(ann)
    for img in json_data['images']:
        one_img_info = dict()  # 主要获取xml文件的filename，size，以及object三项
        img_id = img['id']
        one_img_info['filename'] = img_id
        one_img_info['width'] = img['width']
        one_img_info['height'] = img['height']
        anns = one_img_box[img_id]
        one_img_box[img_id] = [a['bbox'] for a in anns if a['category_id']==1]
        one_img_info['boxes'] = copy.deepcopy(one_img_box[img_id])
        for box in one_img_box[img_id]:
            width = img['width']
            height = img['height']
            box.append(copy.deepcopy(box[2]) * copy.deepcopy(box[3]))  # 增加面积
            box.append(get_center_distance(box[0], box[1],
                                            width / 2.0, height / 2.0,
                                            distType='L1'))  # 增加L1距离
            box.append(get_center_distance(box[0], box[1],
                                            width / 2.0, height / 2.0,
                                            distType='L2'))  # 增加L2距离
            box.append(copy.deepcopy(box[3]/(box[2]+1e-16)))        # 增加h/w 防止除0错误
            # 找出害群之图
            if box[2] == box[3] ==0:
                print(img_id)
            if normalization:
                box[0] = box[0] / width  # normalization x
                box[1] = box[1] / height  # normalization y
                box[2] = box[2] / width  # normalization w
                box[3] = box[3] / height  # normalization h
                if width == height:       # 长宽不相同时不进行归一化
                    box[6] = box[6] / width * np.sqrt(2)  # normalization L1
                    box[7] = box[7] / width  # normalization L2
        one_file_info.append(one_img_info)
        [one_file_boxes.append(box) for box in one_img_box[img_id]]
        if write_to_txt:              # 需要写入到txt文件中,保存目录为json文件目录，文件名为json文件名+.txt
            txt_file = os.path.join(os.path.dirname(path),path.split('/')[-1].split('.')[0]+'.txt')
            f = open(txt_file, 'w')
            for box in one_file_boxes:
                row_txt = '{} {} {} {} {} {} {} {} {}'.format(box[0], box[1], box[2], box[3],box[4],box[5],box[6],box[7],box[8])
                f.write(row_txt)
                f.write('\r\n')      # windows系统
                #f.write('\n')        # Ubuntu系统
            f.close()

    return one_file_info, one_file_boxes
# 读取1个json文件
#one_file_info,one_file_boxes = get_json_data('../COSSY/annotations/Edge_cases.json',write_to_txt=True)
#print('over')

def get_dir_json_data(root_path,normalization=False):
    data_format = '.json'
    # 全部的xml数据
    json_data_info = list()
    json_data_boxes = list()
    # 接下来将data_root_path文件夹下所有xml文件信息全部读取
    for file in os.listdir(root_path):
        if file.endswith(data_format):
            one_json_info, one_json_boxes = get_json_data(os.path.join(root_path, file), normalization=normalization)
            [json_data_info.append(info) for info in one_json_info]
            [json_data_boxes.append(one_json_box) for one_json_box in one_json_boxes]  # 不将difficult 信号传递进来
    for info in json_data_info:
        print('文件:' + info['filename'] + ',人数:%d' % (len(info['boxes'])))
    print('total人数:%d'%(len(json_data_boxes)))
    return json_data_info,json_data_boxes
'''
if __name__ == '__main__':
    data_root_path = '../COSSY/test/'
    json_data_info,json_data_boxes = get_dir_json_data(data_root_path,False)
'''
# 以上是针对json存储文件而言的。

# 接下来需要将数据进行可视化显示
import matplotlib.pyplot as plt
plt.rcParams['savefig.dpi'] = 300 #图片像素     设置分辨率
plt.rcParams['figure.dpi'] = 300 #分辨率
def statistics_content(boxes,img_together=True,**kwargs):
    '''
    :param boxes:
    :param img_together:
    :param kwargs: json_file
    :return:
    '''
    json_file = kwargs.get('json_file', 'Total')
    boxes = np.array(boxes)

    area = boxes[:, -4]
    L1_dist = boxes[:, -3]
    L2_dist = boxes[:, -2]  # 得到L2距离的  x轴
    h_w = boxes[:, -1]  # 得到h/w的值   y轴
    # plt.imshow(L2_dist,h_w)

    plt.figure()
    # L2距离
    # L2距离和h/w之间关系
    if img_together:
        plt.subplot(221)
    # plt.plot(L2_dist, h_w,'.')
    plt.scatter(L2_dist, h_w, s=0.1)
    plt.axhline(y=1, color='r', linestyle='-', label='y=1')
    plt.xlabel('L2 dist', loc='center')
    plt.ylabel('h/w', loc='center')
    plt.legend()
    plt.title(f'{json_file}:L2 dist and h/w', loc='center')
    # L2距离和面积之间关系
    if img_together:
        plt.subplot(222)
    else:
        plt.figure()
    # plt.plot(L2_dist, area,'.')
    plt.scatter(L2_dist, area, s=0.1)
    plt.xlabel('L2 dist', loc='center')
    plt.ylabel('area', loc='center')
    plt.title(f'{json_file}:L2 dist and area', loc='center')

    # L1距离
    # L1距离和h/w之间关系
    if img_together:
        plt.subplot(223)
    else:
        plt.figure()
    # plt.plot(L1_dist, h_w,'.')
    plt.scatter(L1_dist, h_w, s=0.1)
    plt.axhline(y=1, color='r', linestyle='-', label='y=1')
    plt.xlabel('L1 dist', loc='center')
    plt.ylabel('h/w', loc='center')
    plt.legend()
    plt.title(f'{json_file}:L1 dist and h/w', loc='center')
    # L1距离和面积之间关系
    if img_together:
        plt.subplot(224)
    else:
        plt.figure()
    # plt.plot(L1_dist, area,'.')
    plt.scatter(L1_dist, area, s=0.1)
    plt.xlabel('L1 dist', loc='center')
    plt.ylabel('area', loc='center')
    plt.title(f'{json_file}:L1 dist and area', loc='center')
    if img_together:
        plt.suptitle(f'{json_file}', fontsize=12)
def draw_statistics_figure(root_path,all_json_files,concat=True,img_together=True,**kwargs):
    '''
    :param root_path: json文件的根目录
    :param all_json_files: 本次需要绘制的json文件
    :param concat: 是否将所有json文件分开绘制
    :param img_together: 是否将4张图绘制到一张是
    :param kwargs:  当concat为true的时候，需要输入json_file参数
    :return:
    '''
    # 获取所有数据集的框
    json_file = kwargs.get('json_file',None)
    data_format = '.json'
    all_boxes = list()
    for file in os.listdir(root_path):
        if file.endswith(data_format) and file in all_json_files:
            boxes_info,boxes = get_json_data(os.path.join(root_path,file),write_to_txt=False)
            if concat:
                [all_boxes.append(one_box) for one_box in boxes]
                continue
            '''
            for img in boxe s_info:
                if (np.array(img['boxes'])[:,3]==0).any():
                    print(img['filename'])
            '''
            json_file = file.split('.')[0]
            statistics_content(boxes,img_together=img_together,json_file=json_file)
    if concat:
        statistics_content(all_boxes, img_together=img_together, json_file=json_file)
    plt.show()
'''
    分开每一个json文件绘制一张图片
'''
if 0:
    all_json_files = ['Edge_cases.json','MW.json','Meeting1.json','Meeting2.json','Lunch2.json','Lunch3.json','Market1.json','Market2.json','Lab1.json','Lab2.json','Lunch1.json','Activity.json','IRill.json','IRfilter.json']
    #all_json_files = ['Market1.json','Market2.json','Activity.json']
    #all_json_files = ['Meeting1.json','Meeting2.json','Lab1.json', 'Lab2.json', 'MW.json','Market1.json','Market2.json']   # 这里面的数据是满足h>w的
    #all_json_files = ['Meeting1.json']
    #all_json_files = ['Lunch1.json', 'Lunch2.json','Lunch3.json', 'Edge_cases.json', 'IRill.json','IRfilter.json', 'Activity.json']
    draw_statistics_figure('../COSSY/annotations',all_json_files,concat=False,img_together=True,json_file='others_datasets')

'''
    将我们的数据集 Market1和Market2 整合起来绘制
    将其他数据集整合起来绘制
'''
if 0:
    ours_json_files = ['Market1.json','Market2.json']
    others_json_files = ['Edge_cases.json','MW.json','Meeting1.json','Meeting2.json','Lunch2.json','Lunch3.json',\
                         'Lab1.json','Lab2.json','Lunch1.json','Activity.json','IRill.json','IRfilter.json']
    draw_statistics_figure('../COSSY/annotations', ours_json_files, concat=True, img_together=True,
                           json_file='ours_datasets')
    #draw_statistics_figure('../COSSY/annotations',others_json_files,concat=True,img_together=True,\
    #                      json_file='others_datasets')
