#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
from collections import defaultdict
import glob
import json
import os
import os.path as osp
from posixpath import join
import sys
import shutil
import xml.etree.ElementTree as ET
from tqdm import tqdm
import re, time
from multiprocessing import Pool, Queue
from PIL import Image
import numpy as np
import PIL.ImageDraw
import cv2

data_queue = Queue()

class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(MyEncoder, self).default(obj)


def getbbox(self, points):
    polygons = points
    mask = self.polygons_to_mask([self.height, self.width], polygons)
    return self.mask2box(mask)


def images_labelme(data, num):
    image = {}
    image['height'] = data['imageHeight']
    image['width'] = data['imageWidth']
    image['id'] = num + 1
    image['file_name'] = data['imagePath'].split('/')[-1]
    return image


def images_cityscape(data, num, img_file):
    image = {}
    image['height'] = data['imgHeight']
    image['width'] = data['imgWidth']
    image['id'] = num + 1
    image['file_name'] = img_file
    return image


def categories(label, labels_list):
    category = {}
    category['supercategory'] = 'component'
    category['id'] = len(labels_list) + 1
    category['name'] = label
    return category


def annotations_rectangle(points, label, image_num, object_num, label_to_num):
    annotation = {}
    seg_points = np.asarray(points).copy()
    seg_points[1, :] = np.asarray(points)[2, :]
    seg_points[2, :] = np.asarray(points)[1, :]
    annotation['segmentation'] = [list(seg_points.flatten())]
    annotation['iscrowd'] = 0
    annotation['image_id'] = image_num + 1
    annotation['bbox'] = list(
        map(float, [
            points[0][0], points[0][1], points[1][0] - points[0][0], points[1][
                1] - points[0][1]
        ]))
    annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
    annotation['category_id'] = label_to_num[label]
    annotation['id'] = object_num + 1
    return annotation


def annotations_polygon(height, width, points, label, image_num, object_num,
                        label_to_num):
    annotation = {}
    annotation['segmentation'] = [list(np.asarray(points).flatten())]
    annotation['iscrowd'] = 0
    annotation['image_id'] = image_num + 1
    annotation['bbox'] = list(map(float, get_bbox(height, width, points)))
    annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
    annotation['category_id'] = label_to_num[label]
    annotation['id'] = object_num + 1
    return annotation


def get_bbox(height, width, points):
    polygons = points
    mask = np.zeros([height, width], dtype=np.uint8)
    mask = PIL.Image.fromarray(mask)
    xy = list(map(tuple, polygons))
    PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
    mask = np.array(mask, dtype=bool)
    index = np.argwhere(mask == 1)
    rows = index[:, 0]
    clos = index[:, 1]
    left_top_r = np.min(rows)
    left_top_c = np.min(clos)
    right_bottom_r = np.max(rows)
    right_bottom_c = np.max(clos)
    return [
        left_top_c, left_top_r, right_bottom_c - left_top_c,
        right_bottom_r - left_top_r
    ]


# 递归获取文件夹下所有符合条件的文件路径.
def get_all_filepath(root_dir_path, extension_tag=[], recursive = True):
    """
    Args:
        root_dir_path: 文件根目录.
        extension_tag: 文件后缀名组成的列表, Example: ["jpg", "png", "bmp"]
        如果为空 则获取所有
    Returns:
        特定文件夹下所有符合条件的文件路径.
    """
    def check_endswith(new_path, extension_tag):
        if len(extension_tag) == 0:
            return True
        for s in extension_tag:
            if new_path.lower().endswith(s):
                return True
        return False

    paths = []
    for f in os.listdir(root_dir_path):
        new_path = root_dir_path + os.sep + f
        if os.path.isfile(new_path) and check_endswith(new_path,  extension_tag):
            paths.append(new_path)
        elif os.path.isdir(new_path) and recursive:
            temp_list = get_all_filepath(new_path, extension_tag)
            paths.extend(temp_list)
    return paths


def own_label_to_int(label, dataset_name=None):
    """
    get label index
    """
    if dataset_name == "road_vihicle":
        if label == "car":
            return "car",1
        elif label == "van":
            return "van",2
        elif label == "bus":
            return "bus",3
        elif label == "truck":
            return "truck",4
        elif label == "closed-tricycle":
            return "closed-tricycle",5
        elif label == "open-tricycle":
            return "open-tricycle",6
        elif label == "forklift":
            return "forklift",7
        else:
            print("FileExistsError....")
            raise FileExistsError
    if dataset_name == "ct_meter":
        if label == "meter":
            return "meter", 1
        elif label == "switch":
            return "switch", 2
        elif label == "water":
            return "water", 3
        else:raise NotImplementedError
    elif dataset_name == "boeing":
        if label == "other":
            return "other", 1
        elif label == "text":
            return "text", 2
        else:
            return "text", 2
    elif dataset_name == "lx_meter":
        if label == "rect":
            return "rect", 1
        else:raise NotImplementedError
    elif dataset_name == "egg":
        if label == "egg":
            return "egg", 1
        else:raise NotImplementedError
    elif dataset_name == "plate":
        if label == "plate":
            return "plate", 1
        else:
            return None, None
    elif dataset_name == "handwriting":
        return "char", 1
    elif dataset_name == "car3":
        return "other", 1
    elif dataset_name == "car2":
        if label == "person":
            return "person", 1
        else:
            return "other", 2
    elif dataset_name == "car":
        if label == "person":
            return "person", 1
        if label == "motorcycle":
            return "motorcycle", 2
        if label == "car":
            return "car", 3
        if label == "ambulance":
            return "ambulance", 4
        if label == "bus":
            return "bus", 5
        if label == "pickup":
            return "pickup", 6
        if label == "truck":
            return "truck", 7
        if label == "big_truck":
            return "big_truck", 8
        if label == "other":
            return "other", 9
        else:
            print("dataset_name {}, label {}".format(dataset_name, label))
            raise NotImplementedError
    elif dataset_name == "barcode":
        return "barcode", 1
    elif dataset_name == "fire":
        if label == "fire":
            return "fire", 1
        elif label == "smoke":
            return "smoke", 2
        else:raise NotImplementedError
    elif dataset_name == "pedestrian":
        return "pedestrian", 1

    elif dataset_name == "ringelman":
        if label == "ringelman":
            return "ringelman", 1
        elif label == "scar":
            return "car", 2
        elif label == "bcar":
            return "car", 2
        elif label == "car":
            return "car", 2
        # elif label == "ringelman":
        #     return None, None
        #     # return "ringelman", 2
        else:
            print("dataset_name {}, label {}".format(dataset_name, label))
            raise NotImplementedError
    else:
        raise NotImplementedError

def deal_via(data_dir, image_name="images", 
                        dataset_name=None,
                        train_val_ratio=0.0,
                        via_name="via_region_data.json"):
    """

    """
    image_dir = osp.join(data_dir ,image_name)
    via_files = get_all_filepath(image_dir, [via_name])
    
    categories_dict = {}
    images_list = []
    blur_dict = {}
    annotations_list = []
    train_data_coco = {}

    image_num = -1
    object_num = -1

    thread_count = 16
    p = Pool(thread_count)
    for via_file in via_files:

        # deal_via_once(data_dir, via_file, image_num, object_num, dataset_name)
        p.apply_async(deal_via_once, args=(data_dir, via_file, image_num, object_num, dataset_name))
        
        with open(via_file) as rf:
            data_dict = json.loads(rf.read())
        image_num += len(data_dict.keys())
        for _, v in data_dict.items():
            object_num += len(v["regions"])
    # p.close()
    # p.join()
    print("data_queue.qsize: ", data_queue.qsize())

    prev_cnt = 0
    prev_time = time.time()
    total_cnt = 0
    cur_time = time.time()
    while (True) :
        print("prev_cnt: ", prev_cnt, total_cnt)
        total_cnt = data_queue.qsize()
        if total_cnt == len(via_files):
            break
        else:
            time.sleep(1)
            
        cur_time = time.time()
        if total_cnt == prev_cnt:
            pass
        else:
            prev_cnt = total_cnt
            prev_time = cur_time

        if cur_time - prev_time > 60:
            break

    print("data_queue.qsize: ", data_queue.qsize())

    for _ in range(data_queue.qsize()):
        one_data_coco = data_queue.get()
        annotations_list.extend(one_data_coco["annotations"])
        images_list.extend(one_data_coco["images"])
        blur_dict.update(one_data_coco["blur_dict"])
        # update count
        for k, v in one_data_coco["categories"].items():
            if categories_dict.get(k, None) is None:
                categories_dict[k] = v
            else:
                categories_dict[k]["count"] += v["count"]

    train_data_coco['images'] = images_list
    train_data_coco['categories'] = [v for _, v in categories_dict.items()]
    train_data_coco['annotations'] = annotations_list
    train_data_coco["blur_dict"]= blur_dict
    print("blur_dict:", blur_dict)

    return train_data_coco

def deal_via_once(data_root, via_path, image_num=-1, object_num=-1, dataset_name=""):
    
    label_to_num = {}
    labels_list = []

    images_list = []
    blur_dict = {}
    categories_dict = {}
    annotations_list = []
    train_data_coco = {}

    image_count = image_num
    object_count = object_num

    print("via_path: ", via_path)
    with open(via_path) as rf:
        data_dict = json.loads(rf.read())
    data_dir = osp.dirname(via_path)

    index = 0
    for _, data in tqdm(data_dict.items()):
        index+=1
        if index % 3 != 0: continue

        file_name = osp.join(data_dir, data["filename"]).replace(data_root + os.sep, "") 
        filepath = osp.join(data_root, file_name)
        # print(filepath)
        if not osp.exists(filepath):
            print("not exist...", filepath)
            continue
        if len(data["regions"]) == 0:
            print("not regions...", filepath)
            continue

        pil_image = Image.open(filepath)
        w,h = pil_image.size
        if w==0 or h==0:
            print("w or h =0...", filepath)
            continue

        image_count += 1
        image_json = {
            "license": 0,
            "file_name": file_name,
            "coco_url": "",
            "height": pil_image.size[1],
            "width": pil_image.size[0],
            "date_captured": "",
            "flickr_url": "",
            "id": image_count
        }
        images_list.append(image_json)
        if len(data["file_attributes"].get("blur", [])) != 0:
            blur_dict[image_count] = data["file_attributes"]["blur"]

        # print(filepath)
        for region in data["regions"]:

            label = region['region_attributes'].get("label", None)
            if label == "mask": continue
            if label == "retain":
                # print("retain...", filepath)
                continue       # 保留无标签背景图像
            label , label_idx = own_label_to_int(label, dataset_name)
            if label is None:continue

            if label not in labels_list:
                label_to_num[label] = label_idx
                category = {
                    "supercategory": "component",
                    "id": label_idx,
                    "name": label,
                    "count": 0      # 额外添加，用于统计每个类别的数量
                }
                categories_dict[label_idx] = category
                labels_list.append(label)

            shape_type = region['shape_attributes']["name"]
            if shape_type == "rect":
                x = region['shape_attributes']["x"]
                y = region['shape_attributes']["y"]
                w_ = region['shape_attributes']["width"]
                h_ = region['shape_attributes']["height"]
                points = np.array([[x,y], [x+w_, y], 
                    [x+w_, y+h_], [x, y+h_]], dtype=np.int)
            elif shape_type == "polygon":
                points = np.array([[x,y] for x,y in zip(
                                region['shape_attributes']["all_points_x"], 
                                region['shape_attributes']["all_points_y"])], 
                                dtype=np.int)
            else:
                raise NotImplementedError
            object_count += 1
            categories_dict[label_idx]["count"] += 1

            annotation = {}
            annotation['segmentation'] = [list(np.asarray(points).flatten())]
            annotation['iscrowd'] = 0
            annotation['image_id'] = image_count
            annotation['bbox'] = list(map(float, get_bbox(h, w, points)))
            annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
            annotation['category_id'] = label_to_num[label]
            annotation['id'] = object_count 
            annotations_list.append(annotation)
            

    train_data_coco['images'] = images_list
    train_data_coco['categories'] = categories_dict
    train_data_coco['annotations'] = annotations_list
    train_data_coco["blur_dict"] = blur_dict
    data_queue.put(train_data_coco)

    return

def deal_json(ds_type, img_path, json_path):
    data_coco = {}
    images_list = []
    annotations_list = []
    image_num = -1
    object_num = -1
    for img_file in os.listdir(img_path):
        img_label = os.path.splitext(img_file)[0]
        if img_file.split('.')[
                -1] not in ['bmp', 'jpg', 'jpeg', 'png', 'JPEG', 'JPG', 'PNG']:
            continue
        label_file = osp.join(json_path, img_label + '.json')
        print('Generating dataset from:', label_file)
        image_num = image_num + 1
        with open(label_file) as f:
            data = json.load(f)
            if ds_type == 'labelme':
                images_list.append(images_labelme(data, image_num))
            elif ds_type == 'cityscape':
                images_list.append(images_cityscape(data, image_num, img_file))
            if ds_type == 'labelme':
                for shapes in data['shapes']:
                    object_num = object_num + 1
                    label = shapes['label']
                    if label not in labels_list:
                        categories_list.append(categories(label, labels_list))
                        labels_list.append(label)
                        label_to_num[label] = len(labels_list)
                    p_type = shapes['shape_type']
                    if p_type == 'polygon':
                        points = shapes['points']
                        annotations_list.append(
                            annotations_polygon(data['imageHeight'], data[
                                'imageWidth'], points, label, image_num,
                                                object_num, label_to_num))

                    if p_type == 'rectangle':
                        (x1, y1), (x2, y2) = shapes['points']
                        x1, x2 = sorted([x1, x2])
                        y1, y2 = sorted([y1, y2])
                        points = [[x1, y1], [x2, y2], [x1, y2], [x2, y1]]
                        annotations_list.append(
                            annotations_rectangle(points, label, image_num,
                                                  object_num, label_to_num))
            elif ds_type == 'cityscape':
                for shapes in data['objects']:
                    object_num = object_num + 1
                    label = shapes['label']
                    if label not in labels_list:
                        categories_list.append(categories(label, labels_list))
                        labels_list.append(label)
                        label_to_num[label] = len(labels_list)
                    points = shapes['polygon']
                    annotations_list.append(
                        annotations_polygon(data['imgHeight'], data[
                            'imgWidth'], points, label, image_num, object_num,
                                            label_to_num))
    data_coco['images'] = images_list
    data_coco['categories'] = categories_list
    data_coco['annotations'] = annotations_list
    return data_coco


def voc_get_label_anno(ann_dir_path, ann_ids_path, labels_path):
    with open(labels_path, 'r') as f:
        labels_str = f.read().split()
    labels_ids = list(range(1, len(labels_str) + 1))

    with open(ann_ids_path, 'r') as f:
        ann_ids = f.read().split()
    ann_paths = []
    for aid in ann_ids:
        if aid.endswith('xml'):
            ann_path = os.path.join(ann_dir_path, aid)
        else:
            ann_path = os.path.join(ann_dir_path, aid + '.xml')
        ann_paths.append(ann_path)

    return dict(zip(labels_str, labels_ids)), ann_paths


def voc_get_image_info(annotation_root, im_id):
    filename = annotation_root.findtext('filename')
    assert filename is not None
    img_name = os.path.basename(filename)

    size = annotation_root.find('size')
    width = int(size.findtext('width'))
    height = int(size.findtext('height'))

    image_info = {
        'file_name': filename,
        'height': height,
        'width': width,
        'id': im_id
    }
    return image_info


def voc_get_coco_annotation(obj, label2id):
    label = obj.findtext('name')
    assert label in label2id, "label is not in label2id."
    category_id = label2id[label]
    bndbox = obj.find('bndbox')
    xmin = int(bndbox.findtext('xmin')) - 1
    ymin = int(bndbox.findtext('ymin')) - 1
    xmax = int(bndbox.findtext('xmax'))
    ymax = int(bndbox.findtext('ymax'))
    assert xmax > xmin and ymax > ymin, "Box size error."
    o_width = xmax - xmin
    o_height = ymax - ymin
    anno = {
        'area': o_width * o_height,
        'iscrowd': 0,
        'bbox': [xmin, ymin, o_width, o_height],
        'category_id': category_id,
        'ignore': 0,
        'segmentation': []  # This script is not for segmentation
    }
    return anno


def voc_xmls_to_cocojson(annotation_paths, label2id, output_dir, output_file):
    output_json_dict = {
        "images": [],
        "type": "instances",
        "annotations": [],
        "categories": []
    }
    bnd_id = 1  # bounding box start id
    im_id = 0
    print('Start converting !')
    for a_path in tqdm(annotation_paths):
        # Read annotation xml
        ann_tree = ET.parse(a_path)
        ann_root = ann_tree.getroot()

        img_info = voc_get_image_info(ann_root, im_id)
        im_id += 1
        img_id = img_info['id']
        output_json_dict['images'].append(img_info)

        for obj in ann_root.findall('object'):
            ann = voc_get_coco_annotation(obj=obj, label2id=label2id)
            ann.update({'image_id': img_id, 'id': bnd_id})
            output_json_dict['annotations'].append(ann)
            bnd_id = bnd_id + 1

    for label, label_id in label2id.items():
        category_info = {'supercategory': 'none', 'id': label_id, 'name': label}
        output_json_dict['categories'].append(category_info)
    output_file = os.path.join(output_dir, output_file)
    with open(output_file, 'w') as f:
        output_json = json.dumps(output_json_dict)
        f.write(output_json)


def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--dataset_type', help='the type of dataset')
    parser.add_argument('--json_input_dir', help='input annotated directory')
    parser.add_argument('--image_input_dir', help='image directory')
    parser.add_argument(
        '--output_dir', help='output dataset directory', default='./')
    parser.add_argument(
        '--train_proportion',
        help='the proportion of train dataset',
        type=float,
        default=1.0)
    parser.add_argument(
        '--val_proportion',
        help='the proportion of validation dataset',
        type=float,
        default=0.0)
    parser.add_argument(
        '--test_proportion',
        help='the proportion of test dataset',
        type=float,
        default=0.0)
    parser.add_argument(
        '--voc_anno_dir',
        help='In Voc format dataset, path to annotation files directory.',
        type=str,
        default=None)
    parser.add_argument(
        '--voc_anno_list',
        help='In Voc format dataset, path to annotation files ids list.',
        type=str,
        default=None)
    parser.add_argument(
        '--voc_label_list',
        help='In Voc format dataset, path to label list. The content of each line is a category.',
        type=str,
        default=None)
    parser.add_argument(
        '--via_name',
        type=str,
        default='via_region_data.json',
        help='via_region_data json file name')
    parser.add_argument(
        '--dataset_name',
        type=str,
        default='',
        help='dataset name')
    parser.add_argument(
        '--voc_out_name',
        type=str,
        default='voc.json',
        help='In Voc format dataset, path to output json file')
    args = parser.parse_args()
    try:
        assert args.dataset_type in ['via', 'voc', 'labelme', 'cityscape']
    except AssertionError as e:
        print(
            'Now only support the voc, cityscape dataset and labelme dataset!!')
        os._exit(0)

    if args.dataset_type == 'voc':
        assert args.voc_anno_dir and args.voc_anno_list and args.voc_label_list
        label2id, ann_paths = voc_get_label_anno(
            args.voc_anno_dir, args.voc_anno_list, args.voc_label_list)
        voc_xmls_to_cocojson(
            annotation_paths=ann_paths,
            label2id=label2id,
            output_dir=args.output_dir,
            output_file=args.voc_out_name)
    elif args.dataset_type == 'via':
        data_dir = args.image_input_dir
        via_name = args.via_name
        dataset_name = args.dataset_name
        train_data_coco = deal_via(data_dir,
                                    dataset_name=dataset_name, 
                                    via_name=via_name)
        # 全部用于训练
        # if abs(args.train_proportion - 1.0) < 1e-4:
        #     train_json_path = osp.join(data_dir,'instance_train.json')
        #     json.dump(
        #         train_data_coco,
        #         open(train_json_path, 'w'),
        #         indent=4,
        #         cls=MyEncoder)
        # 区分 train test val
        # else:
        split_train_test(args, train_data_coco, data_dir)

    else:
        try:
            assert os.path.exists(args.json_input_dir)
        except AssertionError as e:
            print('The json folder does not exist!')
            os._exit(0)
        try:
            assert os.path.exists(args.image_input_dir)
        except AssertionError as e:
            print('The image folder does not exist!')
            os._exit(0)
        try:
            assert abs(args.train_proportion + args.val_proportion \
                    + args.test_proportion - 1.0) < 1e-5
        except AssertionError as e:
            print(
                'The sum of pqoportion of training, validation and test datase must be 1!'
            )
            os._exit(0)

        # Allocate the dataset.
        total_num = len(glob.glob(osp.join(args.json_input_dir, '*.json')))
        if args.train_proportion != 0:
            train_num = int(total_num * args.train_proportion)
            os.makedirs(args.output_dir + '/train')
        else:
            train_num = 0
        if args.val_proportion == 0.0:
            val_num = 0
            test_num = total_num - train_num
            if args.test_proportion != 0.0:
                os.makedirs(args.output_dir + '/test')
        else:
            val_num = int(total_num * args.val_proportion)
            test_num = total_num - train_num - val_num
            os.makedirs(args.output_dir + '/val')
            if args.test_proportion != 0.0:
                os.makedirs(args.output_dir + '/test')
        count = 1
        for img_name in os.listdir(args.image_input_dir):
            if count <= train_num:
                if osp.exists(args.output_dir + '/train/'):
                    shutil.copyfile(
                        osp.join(args.image_input_dir, img_name),
                        osp.join(args.output_dir + '/train/', img_name))
            else:
                if count <= train_num + val_num:
                    if osp.exists(args.output_dir + '/val/'):
                        shutil.copyfile(
                            osp.join(args.image_input_dir, img_name),
                            osp.join(args.output_dir + '/val/', img_name))
                else:
                    if osp.exists(args.output_dir + '/test/'):
                        shutil.copyfile(
                            osp.join(args.image_input_dir, img_name),
                            osp.join(args.output_dir + '/test/', img_name))
            count = count + 1

        # Deal with the json files.
        if not os.path.exists(args.output_dir + '/annotations'):
            os.makedirs(args.output_dir + '/annotations')
        if args.train_proportion != 0:
            train_data_coco = deal_json(args.dataset_type,
                                        args.output_dir + '/train',
                                        args.json_input_dir)
            train_json_path = osp.join(args.output_dir + '/annotations',
                                       'instance_train.json')
            json.dump(
                train_data_coco,
                open(train_json_path, 'w'),
                indent=4,
                cls=MyEncoder)
        if args.val_proportion != 0:
            val_data_coco = deal_json(args.dataset_type,
                                      args.output_dir + '/val',
                                      args.json_input_dir)
            val_json_path = osp.join(args.output_dir + '/annotations',
                                     'instance_val.json')
            json.dump(
                val_data_coco,
                open(val_json_path, 'w'),
                indent=4,
                cls=MyEncoder)
        if args.test_proportion != 0:
            test_data_coco = deal_json(args.dataset_type,
                                       args.output_dir + '/test',
                                       args.json_input_dir)
            test_json_path = osp.join(args.output_dir + '/annotations',
                                      'instance_test.json')
            json.dump(
                test_data_coco,
                open(test_json_path, 'w'),
                indent=4,
                cls=MyEncoder)

def split_train_test(args, train_data_coco, data_dir):
    # 校验总和为1
    assert abs(args.train_proportion + args.val_proportion \
            + args.test_proportion - 1.0) < 1e-5, 'The sum of pqoportion of training, validation and test datase must be 1!'

    # 打乱顺序
    total_images = train_data_coco["images"]
    np.random.shuffle(total_images)
    np.random.shuffle(total_images)
    image_id_dict = dict()
    for img in total_images:
        image_id_dict[img["id"]] = img

    total_num = len(total_images)
    train_num = int(total_num * args.train_proportion)
    if args.val_proportion == 0.0:
        val_num = 0
        test_num = total_num - train_num
    else:
        val_num = int(total_num * args.val_proportion)
        test_num = total_num - train_num - val_num

    # 分配图片
    train_coco = {"images": total_images[: train_num], "categories": train_data_coco["categories"], "annotations": []}
    test_coco = {"images": total_images[train_num: train_num + test_num], "categories": train_data_coco["categories"], "annotations": []}
    val_coco = {"images": total_images[train_num + test_num:], "categories": train_data_coco["categories"], "annotations": []}
    blur_dict = train_data_coco["blur_dict"]

    train_img_id = set([img["id"] for img in train_coco["images"]])
    test_img_id = set([img["id"] for img in test_coco["images"]])
    val_img_id = set([img["id"] for img in val_coco["images"]])

    def copy_img(src_dir, sub_name, suffix, blur=None):
        """
        @src_dir: 源目录
        @dst_dir: 目标目录
        @suffix: 后缀路径
        @blur: 添加掩码
        """
        src_path = src_dir + os.sep + suffix
        dst_path = src_path.replace("/images/", "/{}/".format(sub_name) )
        dst_dir = osp.dirname(dst_path)
        if not osp.exists(dst_dir):
            os.makedirs(dst_dir, exist_ok=True)
        if not blur:
            shutil.copy(src_path, dst_path)
        else:
            img = cv2.imread(src_path)
            poly = np.array(blur, dtype=np.int).reshape([-1, 2])
            mask = np.zeros(img.shape[:2], dtype=np.uint8)
            mean = np.mean(np.mean(img, axis=0), axis=0).astype(np.uint8)
            fg = np.zeros(img.shape, dtype=np.uint8) + mean
            mask = cv2.fillConvexPoly(mask, poly, [255])
            cv2.copyTo(fg, mask, img)
            cv2.imwrite(dst_path, img)

    # 查找对应图片下的box
    record_id = set()
    for bbox in train_data_coco["annotations"]:
        img_id = bbox["image_id"]
        if img_id in train_img_id:
            train_coco["annotations"].append(bbox)
            if img_id in record_id: continue
            copy_img(data_dir, "train", image_id_dict[img_id]["file_name"], blur_dict.get(img_id, None))
            record_id.add(img_id)
        elif img_id in test_img_id:
            test_coco["annotations"].append(bbox)
            if img_id in record_id: continue
            copy_img(data_dir, "test", image_id_dict[img_id]["file_name"], blur_dict.get(img_id, None))
            record_id.add(img_id)
        else:
            val_coco["annotations"].append(bbox)
            if img_id in record_id: continue
            copy_img(data_dir, "val", image_id_dict[img_id]["file_name"], blur_dict.get(img_id, None))
            record_id.add(img_id)

    def copy_empty_img(sub_name, cur_id_set, record_id):
        for cur_id in cur_id_set:
            if cur_id in record_id: continue
            copy_img(data_dir, sub_name, image_id_dict[cur_id]["file_name"], blur_dict.get(cur_id, None))

    copy_empty_img("train", train_img_id, record_id)
    copy_empty_img("test", test_img_id, record_id)
    copy_empty_img("val", val_img_id, record_id)


    # 保存相应的json
    dir_names = ["train", "test", "val"]
    json_names = ["instance_train.json", "instance_test.json", "instance_val.json"]
    coco = [train_coco, test_coco, val_coco]
    for idx, coco_data in enumerate(coco):
        data_str = json.dumps(coco_data, indent=4, cls=MyEncoder)
        # 替换所有路径
        data_str = data_str.replace("images/", "{}/".format(dir_names[idx]))
        with open(data_dir + os.sep + json_names[idx], "w") as wf:
            wf.write(data_str)


if __name__ == '__main__':
    main()



'''

python tools/via2coco.py \
    --train_proportion=0.99  \
    --val_proportion=0.0   \
    --test_proportion=0.01  \
    --dataset_name=car \
    --dataset_type=via \
    --via_name=via_region_data.merge.json \
    --image_input_dir=/home/xc/work/code/paddle/train_data/det/car


python tools/via2coco.py \
    --train_proportion=0.99  \
    --val_proportion=0.0   \
    --test_proportion=0.01  \
    --dataset_name=ringelman \
    --dataset_type=via \
    --via_name=via_region_data.json \
    --image_input_dir=/home/xc/work/code/paddle/train_data/det/ringelman

# fire smoke
python tools/via2coco.py \
    --train_proportion=0.97  \
    --val_proportion=0.0   \
    --test_proportion=0.03  \
    --dataset_name=fire \
    --dataset_type=via \
    --via_name=via_region_data.json \
    --image_input_dir=/home/xc/work/code/paddle/train_data/det/fire


1类 car
python tools/via2coco.py \
    --train_proportion=0.99  \
    --val_proportion=0.0   \
    --test_proportion=0.01  \
    --dataset_name=car3 \
    --dataset_type=via \
    --via_name=via_region_data.merge.json \
    --image_input_dir=/home/xc/work/code/paddle/train_data/det/car

1类 pedestrian
python tools/via2coco.py \
    --train_proportion=0.97  \
    --val_proportion=0.0   \
    --test_proportion=0.03  \
    --dataset_name=pedestrian \
    --dataset_type=via \
    --via_name=via_region_data.json \
    --image_input_dir=/home/xc/work/code/paddle/train_data/det/pedestrian

    
    python tools/via2coco.py \
    --train_proportion=0.9  \
    --val_proportion=0.05   \
    --test_proportion=0.05  \
    --dataset_name=road_vihicle \
    --dataset_type=via \
    --via_name=via_region_data.json \
    --image_input_dir=/home/xc/work/dataset/vihicle

    
'''