import os
import random
import xml.etree.ElementTree as ET
import argparse

import numpy as np

from utils.dataset import get_classes

"""
划分train,val,test
将VOC格式转为其它格式

"""
def set_dataset_configuration():
    parser = argparse.ArgumentParser()
    # dataset configuration
    parser.add_argument('--dataset', default='NWPUv2', type=str, help='Dataset:NWPUv1, NWPUv2, DIOR')
    parser.add_argument('--dataset_path', type=str, default='/workspace/Dataset/NWPUv2',help='abspath of dataset')
    parser.add_argument('--img_path', type=str, default='JPEGImages')
    parser.add_argument('--ann_path', type=str, default='Annotations/Horizontal Bounding Boxes')
    #--------------------------------------------------------------------------------------------------------------------------------#
    #   args.mode用于指定该文件运行时计算的内容
    #   args.mode为0代表整个标签处理过程，包括获得VOCdevkit/VOC2007/ImageSets里面的txt以及训练用的2007_train.txt、2007_val.txt
    #   args.mode为1代表获得VOCdevkit/VOC2007/ImageSets里面的txt
    #   args.mode为2代表获得训练用的2007_train.txt、2007_val.txt
    #--------------------------------------------------------------------------------------------------------------------------------
    parser.add_argument('--mode', type=int, default=2)
    parser.add_argument('--seed', type=int, default=2023)
    #--------------------------------------------------------------------------------------------------------------------------------#
    #   args.trainval_percent用于指定(训练集+验证集)与测试集的比例，默认情况下 (训练集+验证集):测试集 = 9:1
    #   args.train_percent用于指定(训练集+验证集)中训练集与验证集的比例，默认情况下 训练集:验证集 = 9:1
    #   81%为train，9%为val,10%w为test
    #   仅在args.mode为0和1的时候有效
    #--------------------------------------------------------------------------------------------------------------------------------#
    parser.add_argument('--trainval_percent', type=int, default=0.9,help='trainval:test in dataset')
    parser.add_argument('--train_percent', type=int, default=0.9,help='train:val in trainval set')

    return parser.parse_args()

def convert_annotation(image_id, list_file):
    in_file = open(os.path.join(args.dataset_path, args.ann_path, '%s.xml'%(image_id)), encoding='utf-8')
    tree=ET.parse(in_file)
    root = tree.getroot()

    for obj in root.iter('object'):
        difficult = 0 
        if obj.find('difficult')!=None:
            difficult = obj.find('difficult').text
        cls = obj.find('name').text
        if cls not in classes or int(difficult)==1:
            continue
        cls_id = classes.index(cls)
        xmlbox = obj.find('bndbox')
        b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)), int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text)))
        list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
        
        nums[classes.index(cls)] = nums[classes.index(cls)] + 1
        
if __name__ == "__main__":

    args = set_dataset_configuration()

    if args.dataset == 'NWPUv1':
        dataset_path = 'dataset/NWPUv1'
        classes_path ='model_data/nwpuv1/nwpuv1_classes.txt'
        img_path = 'JPEGImages'
        ann_path = 'Annotations'

    elif args.dataset == 'NWPUv2':
        dataset_path = 'dataset/NWPUv2'
        classes_path ='model_data/nwpuv2/nwpuv2_classes.txt'
        img_path = 'JPEGImages'
        ann_path = 'Annotations'

    elif args.dataset == 'DIOR':
        dataset_path = 'dataset/DIOR'
        classes_path = 'model_data/dior/dior_classes.txt'
        img_path = 'JPEGImages'
        ann_path = 'Annotations/Horizontal Bounding Boxes'

    else:
        print("Error: unrecognized dataset")
    
    random.seed(args.seed)

    classes, _      = get_classes(classes_path)

    #-------------------------------------------------------#
    #   统计目标数量
    #-------------------------------------------------------#
    VOCdevkit_sets  = ['trainval', 'train','val','test']
    photo_nums  = np.zeros(len(VOCdevkit_sets))
    nums        = np.zeros(len(classes))

    if args.mode == 0 or args.mode == 1:
        print("Generate trainval, train, val and test.txt in ImageSets/Main.")
        xmlfilepath     = os.path.join(args.dataset_path, args.ann_path)
        saveBasePath    = os.path.join(args.dataset_path, 'ImageSets/Main')
        temp_xml        = os.listdir(xmlfilepath)
        total_xml       = []
        for xml in temp_xml:
            if xml.endswith(".xml"):
                total_xml.append(xml)

        num     = len(total_xml)  
        list    = range(num)  
        tv      = int(num*args.trainval_percent)  
        tr      = int(tv*args.train_percent)  
        trainval= random.sample(list,tv)  
        train   = random.sample(trainval,tr)  
        
        print("total labeled images:",num)
        print("trainval :",tv)
        print("train :",tr)
        print("val :",tv-tr)
        print("test :",num-tv)

        ftrainval   = open(os.path.join(saveBasePath,'trainval.txt'), 'w')  
        ftest       = open(os.path.join(saveBasePath,'test.txt'), 'w')  
        ftrain      = open(os.path.join(saveBasePath,'train.txt'), 'w')  
        fval        = open(os.path.join(saveBasePath,'val.txt'), 'w')  
        
        for i in list:  
            name=total_xml[i][:-4]+'\n'  
            if i in trainval:  
                ftrainval.write(name)  
                if i in train:  
                    ftrain.write(name)  
                else:  
                    fval.write(name)  
            else:  
                ftest.write(name)  
        
        ftrainval.close()  
        ftrain.close()  
        fval.close()  
        ftest.close()
        print("Generate trainval, train, val and test.txt in ImageSets done.")

    if args.mode == 0 or args.mode == 2:
        print("Generate trainval, train, val and test.txt for train.")
        type_index = 0
        for image_set in VOCdevkit_sets:
            image_ids = open(os.path.join(args.dataset_path, 'ImageSets/Main/%s.txt'%(image_set)), encoding='utf-8').read().strip().split()
            list_file = open('%s.txt'%(image_set), 'w', encoding='utf-8')
            for image_id in image_ids:
                list_file.write('%s/JPEGImages/%s.jpg'%(os.path.abspath(args.dataset_path),image_id))
                convert_annotation(image_id, list_file)
                list_file.write('\n')
            photo_nums[type_index] = len(image_ids)
            type_index += 1
            list_file.close()
        print("Generate trainval, train, val and test.txt for train done.")
        
        def printTable(List1, List2):
            for i in range(len(List1[0])):
                print("|", end=' ')
                for j in range(len(List1)):
                    print(List1[j][i].rjust(int(List2[j])), end=' ')
                    print("|", end=' ')
                print()

        str_nums = [str(int(x)) for x in nums]
        tableData = [
            classes, str_nums
        ]
        colWidths = [0]*len(tableData)
        len1 = 0
        for i in range(len(tableData)):
            for j in range(len(tableData[i])):
                if len(tableData[i][j]) > colWidths[i]:
                    colWidths[i] = len(tableData[i][j])
        printTable(tableData, colWidths)

        if photo_nums[0] <= 500:
            print("训练集数量小于500，属于较小的数据量，请注意设置较大的训练世代（Epoch）以满足足够的梯度下降次数（Step）。")

        if np.sum(nums) == 0:
            print("在数据集中并未获得任何目标，请注意修改classes_path对应自己的数据集，并且保证标签名字正确，否则训练将会没有任何效果！")
