'''
该文件创建于20201104日
用于将COSSY数据集划分，将COSSY文件夹下的所有鱼眼图子目录总量进行统计，按照8:1:1的概率划分训练和验证集和测试集
'''
import os
import json
from collections import defaultdict
import numpy as np
import random
import shutil
import tqdm

datasets = ['Activity','Edge_cases', 'IRill','Lab2','Lunch2',
             'Market1','Market2','Meeting1','MW','IRfilter',
             'Lab1','Lunch1','Lunch3','Meeting2']
datasets_num_dict = dict()               # 该字典用于存放数据集以及对应的图片数
datasets_img_dict = defaultdict(list)    # 该字典用于存放数据集以及对应的有效图片

annotations_path = '../COSSY_train/annotations/'
# 利用annotation中的images信息来获取各个数据集图像数量
for json_file in os.listdir(annotations_path):
    dataset = json_file.split('.')[0]
    if (json_file.endswith('.json')) and (dataset in datasets):
        with open(os.path.join(annotations_path,json_file), 'r') as f:
            json_data = json.load(f)
            datasets_num_dict[dataset] = len(json_data['images'])
            for img in json_data['images']:
                datasets_img_dict[dataset].append(img['id'])
print(datasets_num_dict)

# 获取图像总数
datasets_all_num = 0
for v in datasets_num_dict.values():
    datasets_all_num +=v
print('total number of data:{}'.format(datasets_all_num))

# shuffle数据，得到训练数据集和验证数据集
datasets_seperate_dict = defaultdict(dict)
def shuffle_dadasets(datasets_img_dict,seperate_ratio=[0.8,0.1,0.1]):
    '''
    进行数据集shuffle并划分出训练集、验证集和测试集
    :param datasets_img_dict: 获取的数据集名称以及对应的图片列表
    :param seperate_ratio: [8,1,1]分别对应训练，验证和测试
    :return:
    '''

    for i,k in enumerate(datasets_img_dict.keys()):
        train_length = int(len(datasets_img_dict[k])*seperate_ratio[0])
        val_length = int(len(datasets_img_dict[k])*seperate_ratio[1])

        random.seed(i)                                # 设置随机数种子，保证每次shuffle出来的数据相同
        random.shuffle(datasets_img_dict[k])

        datasets_seperate_dict[k]['train_imgs'] = datasets_img_dict[k][:train_length]
        datasets_seperate_dict[k]['val_imgs'] = datasets_img_dict[k][train_length:train_length+val_length]
        datasets_seperate_dict[k]['test_imgs'] = datasets_img_dict[k][train_length+val_length:]
    return datasets_seperate_dict
datasets_seperate_dict = shuffle_dadasets(datasets_img_dict)
#print(datasets_seperate_dict)

source_datasets_dir = '../COSSY_train/'
source_annotations_dir = '../COSSY_train/annotations/'
des_train_dir = '../COSSY_train_new/'
des_val_dir = '../COSSY_val_new/'
des_test_dir = '../COSSY_test_new/'
des_train_annotations_dir = '../COSSY_train_new/annotations/'
des_val_annotations_dir = '../COSSY_val_new/annotations/'
des_test_annotations_dir = '../COSSY_test_new/annotations/'
# 对所有数据进行制作annotation，同时进行数据集划分
def create_new_datasets(datasets_seperate_dict,
                        source_datasets_dir,
                        source_annotations_dir,
                        des_train_dir,
                        des_val_dir,
                        des_test_dir,
                        des_train_annotations_dir,
                        des_val_annotations_dir,
                        des_test_annotations_dir,
                        train_seperate=True,val_seperate=False,test_seperate=False,
                        defalut_total_name='total'):
    '''
    从原始的所有数据集source_datasets_dir以及对应的标签source_annotations_dir中
    生成新的标签
    datasets_seperate_dict         划分好的图像数据
    :param source_datasets_dir:    初始数据集目录
    :param source_annotations_dir: 初始数据集标签
    :param des_train_dir:          训练数据存放位置
    :param des_val_dir:            验证数据存放位置
    :param des_test_dir:           测试数据存放位置
    :param des_train_annotations_dir: 训练标签存放位置
    :param des_val_annotations_dir:   验证标签存放位置
    :param des_test_annotations_dir:  测试标签存放位置
    :param train_seperate:         是否按照原始数据集分文件处理：默认为是
    :param val_seperate:           是否按照原始数据集分文件处理：默认为否
    :param test_seperate:          是否按照原始数据集分文件处理：默认为否
    :param defalut_total_name:     当需要将图片文件合并时，合并的文件名
    :return:
    '''
    # 训练数据，验证数据，测试数据存放文件夹处理
    for dir in [des_train_dir,des_val_dir,des_test_dir,des_train_annotations_dir,
                des_val_annotations_dir,des_test_annotations_dir]:
        if not os.path.exists(dir):
            os.mkdir(dir)
        else:
            shutil.rmtree(dir)     # 清除文件夹，并重新创建
            os.mkdir(dir)
    # 创建图片存放总目录
    if not train_seperate:
        os.mkdir(os.path.join(des_train_dir, defalut_total_name))# 直接创建
    if not val_seperate:
        os.mkdir(os.path.join(des_val_dir, defalut_total_name))# 直接创建
    if not test_seperate:
        os.mkdir(os.path.join(des_test_dir, defalut_total_name))# 直接创建
    # 新建annotation文件
    json_annotation =dict()
    train_annotations = list()
    train_images = list()
    val_annotations = list()
    val_images = list()
    test_annotations = list()
    test_images = list()
    # 标签中还需要制作一个人的类别标签
    json_annotation['categories'] = [
        {"supercategory": "person",
        "person": "person",
        "id": 1
        }
    ]
    # 定义图片存放路径
    train_path = ''
    val_path = ''
    test_path = ''
    for k in tqdm.tqdm(datasets_seperate_dict.keys()):
        if train_seperate:
            train_annotations = list()
            train_images = list()
            # 新建文件夹
            os.mkdir(os.path.join(des_train_dir,k))     # 直接创建
            train_path = os.path.join(des_train_dir,k)
        else:
            train_path = os.path.join(des_train_dir, defalut_total_name)
        if val_seperate:
            val_annotations = list()
            val_images = list()
            os.mkdir(os.path.join(des_val_dir, k))      # 直接创建
            val_path = os.path.join(des_val_dir, k)
        else:
            val_path = os.path.join(des_val_dir, defalut_total_name)
        if test_seperate:
            test_annotations = list()
            test_images = list()
            os.mkdir(os.path.join(des_test_dir, k))      # 直接创建
            test_path = os.path.join(des_test_dir, k)
        else:
            test_path = os.path.join(des_test_dir, defalut_total_name)
        #train_list = datasets_seperate_dict[k]['train_imgs']
        val_list = datasets_seperate_dict[k]['val_imgs']
        test_list = datasets_seperate_dict[k]['test_imgs']
        source_datasets_img_dir = os.path.join(source_datasets_dir,k)
        with open(os.path.join(source_annotations_dir,k+'.json'), 'r') as f:
            json_data = json.load(f)
        for ann in tqdm.tqdm(json_data['annotations']):
            if ann['image_id'] in val_list:
                val_annotations.append(ann)
                shutil.copyfile(os.path.join(source_datasets_img_dir,ann['image_id']+'.jpg'),
                                os.path.join(val_path,ann['image_id']+'.jpg'))
            elif ann['image_id'] in test_list:
                test_annotations.append(ann)
                shutil.copyfile(os.path.join(source_datasets_img_dir, ann['image_id'] + '.jpg'),
                                os.path.join(test_path, ann['image_id'] + '.jpg'))
            else:
                train_annotations.append(ann)
                shutil.copyfile(os.path.join(source_datasets_img_dir, ann['image_id'] + '.jpg'),
                                os.path.join(train_path, ann['image_id'] + '.jpg'))
        for img in tqdm.tqdm(json_data['images']):
            if img['id'] in val_list:
                val_images.append(img)
            elif img['id'] in test_list:
                test_images.append(img)
            else:
                train_images.append(img)
        if train_seperate:
            json_annotation['annotations'] = train_annotations
            json_annotation['images'] = train_images
            # 写入到json文件中
            with open(os.path.join(des_train_annotations_dir,k+'.json'), 'w') as f:
                f.write(json.dumps(json_annotation, indent=1, separators=(',', ':')))
        if val_seperate:
            json_annotation['annotations'] = val_annotations
            json_annotation['images'] = val_images
            # 写入到json文件中
            with open(os.path.join(des_val_annotations_dir,k+'.json'), 'w') as f:
                f.write(json.dumps(json_annotation, indent=1, separators=(',', ':')))
        if test_seperate:
            json_annotation['annotations'] = test_annotations
            json_annotation['images'] = test_images
            # 写入到json文件中
            with open(os.path.join(des_test_annotations_dir,k+'.json'), 'w') as f:
                f.write(json.dumps(json_annotation, indent=1, separators=(',', ':')))
    if not train_seperate:
        json_annotation['annotations'] = train_annotations
        json_annotation['images'] = train_images
        # 写入到json文件中
        with open(os.path.join(des_train_annotations_dir, defalut_total_name + '.json'), 'w') as f:
            f.write(json.dumps(json_annotation, indent=1, separators=(',', ':')))
    if not val_seperate:
        json_annotation['annotations'] = val_annotations
        json_annotation['images'] = val_images
        # 写入到json文件中
        with open(os.path.join(des_val_annotations_dir, defalut_total_name + '.json'), 'w') as f:
            f.write(json.dumps(json_annotation, indent=1, separators=(',', ':')))
    if not test_seperate:
        json_annotation['annotations'] = test_annotations
        json_annotation['images'] = test_images
        # 写入到json文件中
        with open(os.path.join(des_test_annotations_dir, defalut_total_name + '.json'), 'w') as f:
            f.write(json.dumps(json_annotation, indent=1, separators=(',', ':')))
'''
create_new_datasets(datasets_seperate_dict,
                        source_datasets_dir,
                        source_annotations_dir,
                        des_train_dir,
                        des_val_dir,
                        des_test_dir,
                        des_train_annotations_dir,
                        des_val_annotations_dir,
                        des_test_annotations_dir)
'''
'''
    我们想在验证集中单独查看我们商场数据集的表现情况
    因此，我们从划分后的验证集中将Market1，Market2的数据单独挑选出来放在一个文件夹中，并将annotation放置在annotations目录下
'''
# 需要用到 des_val_annotations_dir 和 des_val_dir
def get_Market_from_val(val_img_dir,val_annotation_path,
                        new_img_dir='./Market',
                        new_annotation_path='./Market.json'):
    '''
    从验证集数据中获取商场图片以及annotations并保存
    :param val_img_path:         验证集图像路径
    :param val_annotation_path:  验证集标签路径
    :param new_imgdir:      挑选出来的商场图片路径
    :param new_annotation_path:  挑选出来的商场 annotation路径
    :return:
    '''
    # 创建挑选出来的商场图片存放路径。如果已经存在，则删除
    if not os.path.exists(new_img_dir):
        os.mkdir(new_img_dir)
    else:
        shutil.rmtree(new_img_dir)  # 清除文件夹，并重新创建
        os.mkdir(new_img_dir)
    # 创建json字典存放商场数据
    json_dict = dict()
    annotations = list()
    images = list()
    json_dict['categories'] = [
        {"supercategory": "person",
         "person": "person",
         "id": 1
         }
    ]
    with open(val_annotation_path, 'r') as f:
        json_data = json.load(f)
        for ann in json_data['annotations']:
            if ann['image_id'].startswith('Market'):
                annotations.append(ann)
                # 拷贝图像到新目录下
                shutil.copyfile(os.path.join(val_img_dir, ann['image_id'] + '.jpg'),
                                os.path.join(new_img_dir, ann['image_id'] + '.jpg'))
        for img in json_data['images']:
            if img['id'].startswith('Market'):
                images.append(img)
    json_dict['annotations'] = annotations
    json_dict['images'] = images
    with open(new_annotation_path, 'w') as f:
        f.write(json.dumps(json_dict, indent=1, separators=(',', ':')))
'''
    数据从COSSY_train(对COSSY中鱼眼图像进行复制) 
    经过create_new_datasets函数
    变化到了 划分好的文件夹以及对应的annotations
    COSSY_train_new
    COSSY_val_new
    COSSY_test_new
'''
get_Market_from_val('../COSSY_val_new/total','../COSSY_val_new/annotations/total.json',
                        new_img_dir='../COSSY_val_new/Market',
                        new_annotation_path='../COSSY_val_new/annotations/Market.json')
# 验证 Market1.json 和 Market2.json 文件
'''
with open('../COSSY_train_new/annotations/Market2.json', 'r') as f:
    json_data = json.load(f)
    for img in json_data['images']:
        if not os.path.exists(os.path.join('../COSSY_train_new/Market2',img['id']+'.jpg')):
            print(img['id'])
'''
'''
    然后我将训练集中的Market1和Market2在绍峰那里经过mixup处理之后，
    构建新的训练数据集
    COSSY_train_shao
    接着我再将 COSSY_train_shao 在我自己这里进行角度直方图增强，使得Market1中所有框中心在图像四周分布均匀
    这个直接在COSSY_train_shao里面做处理，在目录下生成两个文件分别是 Market1_enhance 和 Market2_enhance
    这个就是我们最终本次的训练文件
    
'''
