# ├── coco2017
#        ├── train2017所有训练图像文件夹(118287张)
#        ├── val2017 所有验证图像文件夹(5000张)
#        ├── annotations:标注文件夹
#                   ├── istances_train2017.json:对应目标检测、分割任务的训练集标注文件
#                   ├── instances_val2017.json: 对应目标检测、分割任务的验证集标注文件
#                   ├── captions_train2017.json: 对应图像描述的训练集标注文件
#                   ├── captions_val2017.json: 对应图像描述的验证集标注文件
#                   ├── person_keypoints_train2017.json: 对应人体关键点检测的训练集标注文件
#                   ├── person_keypoints_val2017.json: 对应人体关键点检测的验证集标注文件

# 标准的voc数据集加载类
# VOCdevkit
#     |
#     |---VOC2007
#            |
#            |---Annotations
#                    |
#                    |---000005.xml
#                    |---...
#            |---ImageSets
#                    |
#                    |---Main
#                         |
#                         |---train.txt
#                         |---val.txt
#            |---JPEGImages
#                    |
#                    |---000005.jpg
#            |---SegmentationClass
#            |---SegmentationObject

# image_objs是字典列表，字典的内容为：
# {
#     'image_info': {
#         'filename': filename,           //在数据集中的相对位置
#         'width': width,
#         'height': height,
#         'depth': depth
#     },
#     'objects': [
#         [cls, xmin, ymin, xmax, ymax],
#         [cls, xmin, ymin, xmax, ymax],
#         ...,
#     ]
# }

import os
import random
from pathlib import Path
import shutil
import json
import cv2
import numpy as np
import xml.etree.ElementTree as ET
# from dataset.dataloader import CocoDataset, SimpleDetDataset
from tqdm import tqdm

PRE_DEFINE_CATEGORIES = {}
START_BOUNDING_BOX_ID = 0


# image_objs = [{'image_info':{'filename': image_name, 'width': w, 'height': h, 'depth': depth}, 'objects':[[cls, xmin, ymin, xmax, ymax], [cls, xmin, ymin, xmax, ymax]]} ...]

# 注意如果存放图片的路径有多个，那么写成coco格式时会有图片路径重叠
def conver2coco(images_objs, json_file):
    json_dict = {"images": [],
                 "type": "instances",
                 "annotations": [],
                 "categories": []}
    categories = PRE_DEFINE_CATEGORIES
    bnd_id = START_BOUNDING_BOX_ID
    for i, file_dict in enumerate(images_objs):
        info = file_dict['image_info']
        objs = file_dict['objects']
        image = {'file_name': Path(info['filename']).name,
                 'height': info['height'],
                 'width': info['width'],
                 'id': i}
        json_dict['images'].append(image)
        for obj in objs:
            o_width = abs(obj[3] - obj[1])
            o_height = abs(obj[4] - obj[2])
            annotation = dict()
            annotation['area'] = o_width * o_height
            annotation['iscrowd'] = 0
            annotation['image_id'] = obj[0]
            annotation['bbox'] = [obj[1], obj[2], o_width, o_height]
            annotation['category_id'] = obj[0]
            annotation['id'] = bnd_id
            annotation['ignore'] = 0
            json_dict['annotations'].append(annotation)
            bnd_id = bnd_id + 1
    for cate, cid in categories.items():
        cat = {'supercategory': 'none', 'id': cid, 'name': cate}
        json_dict['categories'].append(cat)
    json_data = json.dumps(json_dict, indent=1)
    with  open(json_file, 'w') as w:
        w.write(json_data)


def conver2simpledet(images_objs, txt_file):
    f = open(txt_file, 'w')
    classes = []
    for image in images_objs:
        bbox = image['objects']
        line = image['image_info']['filename']
        
        if len(bbox):    
            for box in bbox:
                classes.append(int(box[0]))
                line += ' {},{},{},{},{}'.format(int(box[0]), int(box[1]), int(box[2]), int(box[3]), int(box[4]))
            f.writelines(line + '\n')
    f.close()
    
    f = open(Path(txt_file).parent.joinpath('classes.txt'), 'w')
    classes = list(set(classes))
    for n_cls in classes:
        f.writelines(str(n_cls))
    f.close()
    


def parse_per_xml(xml_name, image_dir='JPEGImages'):
    with open(xml_name, "r", encoding='UTF-8') as in_file:
        tree = ET.parse(in_file)
        root = tree.getroot()
        image_name = root.find('filename').text
        size = root.find('size')
        w = int(size.find('width').text)
        h = int(size.find('height').text)
        depth = int(size.find('depth').text)
        info_dict = {'filename': os.path.join(image_dir, Path(image_name).name), 'width': w, 'height': h,
                     'depth': depth}
        objs = []
        for obj in root.iter('object'):
            cls = obj.find('name').text
            xmlbox = obj.find('bndbox')
            xmin = int(xmlbox.find('xmin').text)
            ymin = int(xmlbox.find('ymin').text)
            xmax = int(xmlbox.find('xmax').text)
            ymax = int(xmlbox.find('ymax').text)
            objs.append([cls, xmin, ymin, xmax, ymax])
        return info_dict, objs


# 解析simpledataset数据集文件夹
def parse_simpledataset(dataset_name, annot_file='train.txt'):
    annot_path = os.path.join(dataset_name, annot_file)
    if not os.path.exists(annot_path):
        print('{} does not exists'.format(annot_path))
        return
    f = open(annot_path, 'r')
    annot_lines = [l.strip() for l in f.readlines()]
    f.close()
    image_objs = []
    par = tqdm(annot_lines)
    for line in par:
        image_path = os.path.join(dataset_name, line.split(' ')[0])
        if not os.path.exists(image_path):
            print('{} does not exists'.format(image_path))
            continue
        img = cv2.imread(image_path)
        image_info = {'filename': line.split(' ')[0], 'width': img.shape[1], 'height': img.shape[0],
                      'depth': img.shape[2]}
        objects = []
        bbox = line.split(' ')[1:]
        for box in bbox:
            objects.append([max(0, int(x)) for x in box.split(',')])
        image_objs.append({'image_info': image_info, 'objects': objects})
    par.close()
    return image_objs


# 传入例子，比如VOCdevkit
def parse_voc(dataset_name, setname='VOC2007', trainval='train.txt'):
    f = open(os.path.join(dataset_name, setname, 'ImageSets', 'Main', trainval))
    names = [l.strip() for l in f.readlines()]
    f.close()
    image_objs = []
    for name in names:
        annot_lowwer_suffix = os.path.join(dataset_name, setname, 'Annotations', name + '.xml')
        annot_upper_suffix = os.path.join(dataset_name, setname, 'Annotations', name + '.XML')
        annot_file = ''
        if os.path.exists(annot_lowwer_suffix):
            annot_file = annot_lowwer_suffix
        elif os.path.exists(annot_upper_suffix):
            annot_file = annot_upper_suffix
        else:
            print('{} or {} do not exists'.format(annot_lowwer_suffix, annot_upper_suffix))
            continue
        info_dict, objs = parse_per_xml(annot_file, os.path.join(setname, 'JPEGImages'))
        image_path = os.path.join(dataset_name, info_dict['filename'])
        if not os.path.exists(image_path):
            print('image {} does not exists'.format(image_path))
            continue
        image_objs.append({'image_info': info_dict, 'objects': objs})
    return image_objs


def parse_coco(dataset_name, annot_file='istances_train2017.json', image_dir='train2017'):
    annot_path = os.path.join(dataset_name, 'Annotations', annot_file)
    if not os.path.exists(annot_path):
        print('{} does not exists'.format(annot_path))
        return
    f = open(annot_path, 'r')
    json_data = json.load(f)
    f.close()
    images = json_data['images']
    annotations = json_data['annotations']
    categories = json_data['categories']
    image_objs_dict = {}
    print('Getting image list')
    par = tqdm(images)
    for img in par:
        image_objs_dict[img['id']] = {'image_info':
                                          {'filename': os.path.join(image_dir, img['file_name']),
                                           'width': img['width'],
                                           'height': img['height'],
                                           'depth': 3},
                                      'objects': []}
    par.close()
    print('Parsing annotations')
    par = tqdm(annotations)
    for annot in par:
        bbox = [annot['bbox'][0], annot['bbox'][1], (annot['bbox'][0] + annot['bbox'][2]),
                (annot['bbox'][1] + annot['bbox'][3])]
        bbox = [max(0, x) for x in bbox]
        bbox.insert(0, annot['category_id'])
        image_objs_dict[annot['image_id']]['objects'].append(bbox)
        
    image_objs = [v for k, v in image_objs_dict.items()]
    par.close()
    return image_objs


def check_single_obj(file_dict, dataset_path):
    info = file_dict['image_info']
    objs = file_dict['objects']
    file_name = os.path.join(dataset_path, info['filename'])
    if not os.path.exists(file_name):
        print(file_name + ' not exists')
        return
    # cv2.namedWindow(info['filename'], 0)
    img = cv2.imread(os.path.join(dataset_path, info['filename']))
    for obj in objs:
        # if obj[0] == 9:
        #     continue
        img = cv2.rectangle(img, (obj[1], obj[2]), (obj[3], obj[4]), (255, 0, 0), 1)
        img = cv2.putText(img, str(obj[0]), (obj[1] + 10, obj[2] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))
    cv2.imwrite(os.path.join('check', Path(info['filename']).name), img)
    # cv2.imshow(info['filename'], img)
    # cv2.waitKey()


def check_objs(images_objs, dataset_path):
    index = 0
    while index >= 0 and index < len(images_objs):
        print('input index to check image object')
        index = int(input())
        file_dict = images_objs[index]
        print(file_dict)
        check_single_obj(file_dict, dataset_path)


def check_all_objs(images_objs, dataset_path):
    par = tqdm(images_objs)
    for file_dict in par:
        check_single_obj(file_dict, dataset_path)
    par.close()


if __name__ == '__main__':
    dataset_path = os.path.join('aisafety', 'merge')
    set_name = 'trainval'
    image_objs = parse_coco(dataset_path, set_name + '.json', 'Images')
    print('get objs len {}'.format(len(image_objs)))
    
    # check:
    
    
    # if os.path.exists('check'):
    #     shutil.rmtree('check')
    # os.mkdir('check')
    # check_all_objs(image_objs, dataset_path)
    
    # conver:
    conver2simpledet(image_objs, os.path.join(dataset_path, 'trainval.txt'))

