import config as cfg
def count_xml_categories(xml_path,class_txt_path):
    import os
    import xml.etree.ElementTree as ET
    classnames = {}
    for filename in os.listdir(xml_path):
        file_path = os.path.join(xml_path, filename)
        dom = ET.parse(file_path)
        root = dom.getroot()
        for obj in root.iter('object'):  # 获取object节点中的name子节点
            tmp_name = obj.find('name').text
            if tmp_name in classnames:
                classnames[tmp_name] += 1
            else:
                classnames[tmp_name] = 0

    with open(class_txt_path, 'a') as f:
        for k, v in classnames.items():
            f.write(k + ":" + str(v) + "\n")
    print(classnames)
def normalize_file_name(dir1:str,dir2:str):
    import os
    from tqdm import tqdm
    delete_list = []
    renamed1_dict = {}
    renamed2_dict = {}
    file_names1 = os.listdir(dir1)
    file_names2 = os.listdir(dir2)

    for index1, name1 in tqdm(enumerate(file_names1), ncols=100, total=len(file_names1)):
        baseName1 = ''.join(name1.split(".")[:-1])
        name1_path = os.path.join(dir1, name1)
        postfix_name1 = "."+name1.split(".")[-1]
        for index2, name2 in enumerate(file_names2):
            baseName2 = ''.join(name2.split(".")[:-1])
            # name2_path = os.path.join(dir2,name2)
            postfix_name2 = "."+name2.split(".")[-1]

            flag = False
            if baseName2 == baseName1:
                rename = str(index1)+str(index2)
                renamed1_dict[name1] = rename+postfix_name1
                renamed2_dict[name2] = rename+postfix_name2
                flag = True
                break
        if not flag:  # dir1的文件是否在dir2中也存在，如果不存在，则删除，用来确保两个文件夹里的文件前缀名是一一对应的
            delete_list.append(name1_path)

    for k, v in renamed2_dict.items():
        os.rename(os.path.join(dir2, k), os.path.join(dir2, v))
    for k, v in renamed1_dict.items():
        os.rename(os.path.join(dir1, k), os.path.join(dir1, v))
    for i in delete_list:
        print("Deleted a file.")
        os.remove(i)

def delete_posix(path:str,correct_postfix:str,error_postfix:list):
    '''
    path:文件夹路径
    correct_postfix:正确的文件后缀名,只会对以这种文件进行处理
    error_postfix:不要的文件后缀名
    '''
    import os
    all_fileNames = os.listdir(path)
    for fileName in all_fileNames:
        if fileName.endswith(correct_postfix):
            file_prefix = fileName[:-(len(correct_postfix)+1)]

            file_prefix_postfix = file_prefix.split(".")[-1]
            if file_prefix_postfix in error_postfix:
                new_file_name = "".join(file_prefix.split(".")[:-1])
                new_full_path = os.path.join(path,new_file_name)+"."+correct_postfix
                old_full_path = os.path.join(path,fileName)
                os.rename(old_full_path,new_full_path)

def delete_xml_class(origin_dir,class_path):
    # 删除xml中某一类
    import os
    import xml.etree.ElementTree as ET
    from tqdm import tqdm

    with open(class_path) as f:
        class_names = [i.strip() for i in f.readlines()]

    xml_files = os.listdir(origin_dir)
    for filename in tqdm(xml_files):  # 迭代每个xml文件
        file_path = os.path.join(origin_dir, filename)
        dom = ET.parse(file_path)
        root = dom.getroot()
        del_list = []
        y = root.findall('object')
        for obj in root.iter('object'):  # 获取object节点中的name子节点
            if obj.find('name').text not in class_names:  # 不在，就删除
                del_list.append(obj)

        for i in del_list:
            root.remove(i)
        dom.write(file_path, xml_declaration=True)
        # 检查是否还有object，如果否，就删除这个xml文件
        if len(root.findall('object')) == 0:
            os.remove(file_path)

def delete_xmlAndImg(origin_dir:str,delete_list:dict):
    import os
    import random
    import xml.etree.ElementTree as ET
    from tqdm import tqdm
    xml_files = os.listdir(origin_dir)
    for filename in tqdm(xml_files):  # 迭代每个xml文件
        file_path = os.path.join(origin_dir, filename)
        dom = ET.parse(file_path)
        root = dom.getroot()
        for obj in root.iter('object'):  # 获取object节点中的name子节点
            if (obj.find('name').text in delete_list.keys()):
                delete_list[obj.find('name').text].append(file_path)  # 找到一个，数量加一

    for k, v in delete_list.items():
        v = list(set(v))
        xml_paths = random.sample(v, len(v) // 4)
        for i in xml_paths:
            os.remove(i)

def alter_object(xml_path:str,outputNewClassNameTxt:bool,dir_path:str):
    import os
    import xml.etree.ElementTree as ET
    from tqdm import tqdm

    name2name = {
        "e_licensePlate": "LicensePlate",
        "e_vehicleType": "VehicleBrand",
        "e_vehicleBody": "Vehicle",
        "e_vehicleTripod": "VehicleTripod",
        "e_abs": "ABS",
        "e_fuzhuzhidong": "AuxiliaryBraking",
        "e_asr": "ASR",

        "person": "Person",
        "vehicle": "Vehicle",
        "anquandai": "SafetyBelt",
        "xcjly": "CarDVR",
        "motorback": "MotorBike",
        "emergency_hammer": "EmergencyHammer",
        "fire_extinguisher": "FireExtingusher",
        "fire_extinguisher_meter": "FireExtingusherMeter",
        "IDcardZ": "IDCard",
        "IDcardF": "IDCard",
        "mingpai": "NamePlate",
        "xingshizheng_zhuye": "DrivingLicense",
        "xingshizheng_fuye": "DrivingLicense"}

    len_classes = len(name2name)
    print(f"原本共有 {len_classes} 个类别")

    for filename in tqdm(os.listdir(xml_path)):
        file_path = os.path.join(xml_path, filename)
        dom = ET.parse(file_path)
        root = dom.getroot()
        for index, obj in enumerate(root.iter('object')):  # 获取object节点中的name子节点
            name = obj.find('name').text
            if name in name2name.keys():
                obj.find('name').text = name2name[name]  # 更改类别名

        # 保存到指定文件
        dom.write(file_path, xml_declaration=True)

    if outputNewClassNameTxt:
        new_classes = []
        for k, v in name2name.items():
            print(f"更改 {k} -> {v}")
            if v not in new_classes:
                new_classes.append(v + "\n")

        with open(dir_path + "new_class.txt", "a") as f:
            f.writelines(new_classes)

def extract_BDD100K_Label(path:str,img_path:str,class_path:str):
    '''
    path:对应json路径
    '''
    import json
    import os
    from tqdm import tqdm
    with open(class_path) as f:
        class_list = [i.strip() for i in f.readlines()]         #获得类别名

    with open(path) as f:
        json_file = json.load(f)
        for img_info in tqdm(json_file):
            img_name = img_info['name']         #图片对应的文件名
            img_timestamp = img_info['timestamp']   #时间戳
            img_weather = img_info['attributes']['weather'] #天气
            img_scene = img_info['attributes']['scene'] #场景
            img_timeoufday = img_info['attributes']['timeofday']    #白天或黑夜

            #---------------------读取完毕-----------------------------------
            a_line = os.path.join(img_path,img_name)

            for img_label in img_info['labels']:
                category = img_label['category']
                label_attributes = img_label['attributes']
                manualAttributes = img_label['manualAttributes']
                manualShape = img_label['manualShape']
                if category in class_list:
                    class_id = class_list.index(category)
                else:
                    raise ValueError(f"Class txt has no category: {category}")
                if 'box2d' in img_label:
                    x1,y1 = img_label['box2d']['x1'],img_label['box2d']['y1']
                    x2,y2 = img_label['box2d']['x2'],img_label['box2d']['y2']
                    label_line = " "+str(round(x1))+","+str(round(y1))+","+str(round(x2))+","+str(round(y2))+","+str(class_id)
                    a_line += label_line
                    # break
                # elif 'poly2d' in img_label:
                #     vertices = img_label['poly2d']['vertices']
                #     types = img_label['poly2d']['types']
                #     closed = img_label['poly2d']['closed']

            #----处理完一个图片------------------------
            a_line += "\n"      #添加换行符
            with open(os.path.join(os.path.dirname(path),"label.txt"),'a') as f:
                f.write(a_line)

def split_tra_val_test_data(txt_path:str,split:list):
    with open(txt_path) as f:
        lines = f.readlines()

    import random
    import os
    random.shuffle(lines)
    base_path = os.path.dirname(txt_path)
    train_lines = lines[:round(len(lines)*(split[0]/10))]
    val_lines = lines[round(len(lines)*(split[0]/10)):round(len(lines)*((split[0]+split[1])/10))]
    test_lines = lines[-round(len(lines)*(split[2]/10)):]

    train_txt = open(os.path.join(base_path,'train.txt'),'w')
    val_txt = open(os.path.join(base_path,'val.txt'),'w')
    test_txt = open(os.path.join(base_path,'test.txt'),'w')

    train_txt.writelines(train_lines)
    val_txt.writelines(val_lines)
    test_txt.writelines(test_lines)

    train_txt.close()
    val_txt.close()
    test_txt.close()

def voc_annotation(VOCdevkit_sets,annotation_mode:int=0,xml_annotation_path:str='',
                   layout_path:str='',classes_path:str='',train_val_txt_path:str='',
                   image_path:str='',
                   trainval_percent:float=0.9,train_percent:float=0.9):
    import random
    import os
    from tqdm import tqdm
    from utils.utils import get_classes
    import xml.etree.ElementTree as ET

    classes, _ = get_classes(classes_path)

    def convert_annotation(image_id, list_file):
        in_file = open(os.path.join(xml_annotation_path, '%s.xml' % image_id), encoding='utf-8')
        tree = ET.parse(in_file)
        root = tree.getroot()

        for obj in root.iter('object'):
            difficult = 0
            if obj.find('difficult') != None:
                difficult = obj.find('difficult').text
            cls = obj.find('name').text
            if cls not in classes or int(difficult) == 1:
                continue
            cls_id = classes.index(cls)
            xmlbox = obj.find('bndbox')
            b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)),
                 int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text)))
            list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))

    random.seed(0)
    if annotation_mode == 0 or annotation_mode == 1:
        print("Generate txt in ImageSets.")
        temp_xml = os.listdir(xml_annotation_path)
        total_xml = []
        for xml in temp_xml:
            if xml.endswith(".xml"):
                total_xml.append(xml)

        num = len(total_xml)
        list = range(num)
        tv = int(num * trainval_percent)
        tr = int(tv * train_percent)
        trainval = random.sample(list, tv)
        train = random.sample(trainval, tr)

        print("train and val size", tv)
        print("train size", tr)
        ftrainval = open(os.path.join(layout_path, 'trainval.txt'), 'w')
        ftest = open(os.path.join(layout_path, 'test.txt'), 'w')
        ftrain = open(os.path.join(layout_path, 'train.txt'), 'w')
        fval = open(os.path.join(layout_path, 'val.txt'), 'w')

        for i in tqdm(list):
            name = total_xml[i][:-4] + '\n'
            if i in trainval:
                ftrainval.write(name)
                if i in train:
                    ftrain.write(name)
                else:
                    fval.write(name)
            else:
                ftest.write(name)

        ftrainval.close()
        ftrain.close()
        fval.close()
        ftest.close()
        print("Generate txt in ImageSets done.")

    if annotation_mode == 0 or annotation_mode == 2:
        print("Generate train.txt and val.txt for train.")
        for image_set in VOCdevkit_sets:
            image_ids = open(os.path.join(layout_path, '%s.txt' % image_set), encoding='utf-8').read().strip().split()
            list_file = open(os.path.join(train_val_txt_path, '%s.txt' % (image_set)), 'w', encoding='utf-8')
            for image_id in tqdm(image_ids):
                list_file.write('%s/%s.jpg' % (os.path.abspath(image_path), image_id))

                convert_annotation(image_id, list_file)
                list_file.write('\n')
            list_file.close()
        print("Generate 2007_train.txt and 2007_val.txt for train done.")


if __name__ == "__main__":
    '''
    ======================================================
    flag = 0 获取xml文件各类别的数目
    ------------------------------------------------------
    flag = 1 删除多余的文件名,例如原文件名为0x4j340.jpg.json，更改为0x4j340.json
    ------------------------------------------------------
    flag = 2 使得两个文件夹下的前缀名一一对应，通常用来对标签文件和图片文件规范化名字，
    也可防止含有空格等不规范名。例如文件夹A:jintian.jpg，文件夹B:jintian.json
    ------------------------------------------------------
    flag = 3 删除xml中某一类
    ------------------------------------------------------
    flag = 4 删除类别数量太多的xml,建议一次删除一个类别的部分图片，然后再次使用flag=0统计数量
    ------------------------------------------------------
    flag = 5 更改xml中某一类名，建议先执行删除，删除掉不想要的类
    ------------------------------------------------------
    flag = 6 处理BDD100K的标签，将一个json读取到一个txt里
    images_path x1,y1,x2,y2,class_id
    ------------------------------------------------------
    flag = 7 将一个txt拆分成tain.txt，val.txt，test.txt
    ------------------------------------------------------
    flag = 8 将类VOC格式的数据集读取到对应的*.txt里，类似VOC要求有Annotations,JPEGImages,Layout
    ======================================================
    '''
    flag = 6

    if flag==0:
        # 获取xml文件各类别的数目
        xml_path = "/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/目标检测数据集/dataset_all/Annotations/"
        class_txt_path = "/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/目标检测数据集/dataset_all/num_class.txt"
        count_xml_categories(xml_path,class_txt_path)
    elif flag==1:
        path = "/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/公司/DeepLabv3_data/test/json"
        delete_posix(path,"json",["jpg","png","jpeg"])
    elif flag==2:
        dir1 = "/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/公司/DeepLabv3_data/test/json(副本)"
        dir2 = "/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/公司/DeepLabv3_data/test/origin_img(副本)"
        normalize_file_name(dir2,dir1)
    elif flag==3:
        origin_dir = '/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/目标检测数据集/dataset_all/Annotations/'  # 设置原始标签路径为 Annos
        class_path = "/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/目标检测数据集/dataset_all/new_class.txt"  # 留下的类别名，路径
        delete_xml_class(origin_dir,class_path)
    elif flag==4:
        origin_dir = '/media/liyan/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/目标检测数据集/dataset_all/Annotations/'  # 设置原始标签路径为 Annos
        delete_list = {'IDCard': []}  # 需要删除的标签列表
        delete_xmlAndImg(origin_dir,delete_list)
    elif flag==5:
        xml_dir = '/media/liyan/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/目标检测数据集/dataset_all/Annotations/'
        output_new_class_name_txt = True  # 是否输出为新的类别名文件夹
        dir_path = "/media/liyan/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/目标检测数据集/dataset_all/"  # 输出的新文件夹
        alter_object(xml_dir,output_new_class_name_txt,dir_path)
    elif flag==6:
        json_path = '/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/private个人数据集/BDD10K/labels/bdd10k_json.json'
        img_path = '/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/private个人数据集/BDD10K/images'
        class_path = '/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/private个人数据集/BDD10K/labels/bdd10k_class.txt'
        extract_BDD100K_Label(json_path,img_path,class_path)
    elif flag==7:
        txt_path = "/media/jiji/3b75ab20-92d3-4be6-81c2-4e1798e2fe16/我的数据集/BDD100K/labels/val_label10K.txt"
        split_tra_val_test_data(txt_path,split=[7,1,2])
    elif flag==8:
        # --------------------------------------------------------------------------------------------------------------------------------#
        #   trainval_percent用于指定(训练集+验证集)与测试集的比例，默认情况下 (训练集+验证集):测试集 = 9:1
        #   train_percent用于指定(训练集+验证集)中训练集与验证集的比例，默认情况下 训练集:验证集 = 9:1
        #   仅在annotation_mode为0和1的时候有效
        # --------------------------------------------------------------------------------------------------------------------------------#
        # -------------------------------------------------------------------#
        #   必须要修改，用于生成2007_train.txt、2007_val.txt的目标信息
        #   与训练和预测所用的classes_path一致即可
        #   如果生成的2007_train.txt里面没有目标信息
        #   那么就是因为classes没有设定正确
        #   仅在annotation_mode为0和2的时候有效
        # -------------------------------------------------------------------#
        # --------------------------------------------------------------------------------------------------------------------------------#
        #   annotation_mode用于指定该文件运行时计算的内容
        #   annotation_mode为0代表整个标签处理过程，包括获得VOCdevkit/VOC2007/ImageSets里面的txt以及训练用的2007_train.txt、2007_val.txt
        #   annotation_mode为1代表获得VOCdevkit/VOC2007/ImageSets里面的txt
        #   annotation_mode为2代表获得训练用的2007_train.txt、2007_val.txt
        # --------------------------------------------------------------------------------------------------------------------------------#
        VOCdevkit_sets = ['train', 'val']
        voc_annotation(VOCdevkit_sets=['train','val'],annotation_mode=0,xml_annotation_path=cfg.all_annotation_path,
                       layout_path=cfg.layout_path,classes_path=cfg.classes_path,train_val_txt_path=cfg.where_to_save_txt_path,
                       image_path=cfg.all_image_path,trainval_percent=0.9,train_percent=0.9)
    else:
        raise ValueError(f"Error Flag :{flag}")



