import numpy as np
import os, sys, shutil, glob
from tqdm import tqdm as tqdm
import os.path as osp

file_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(file_path, "..", "..", "..")))
from code_aculat.data_convert.voc2coco import get_categories
from code_aculat.data_analyse.data_analyse_coco import analyse_num_each_class_drop_rare_cat, sorted_dict
from code_aculat.data_convert.voc2yolo import convert_xml2yolo, move_txt_for_category_absence


def random_split():
    "随机将xml文件划分为训练集和验证集，划分两折"
    xml_dir = r"/Users/edy/Data/RuPin/钻石碗/数据/BN_data/Annotations"
    out_dir = osp.join(osp.dirname(xml_dir),'data_split')
    valid_portion = 0.25

    foldv1 = osp.join(out_dir, 'foldv1')
    foldv2 = osp.join(out_dir, 'foldv2')

    xml_files = list(filter(lambda x: x.endswith('.xml'), os.listdir(xml_dir)))
    np.random.shuffle(xml_files)

    # foldv1
    v1_train_file = xml_files[int(len(xml_files) * valid_portion):]
    v1_val_file = xml_files[:int(len(xml_files) * valid_portion)]

    # foldv2
    v2_train_file = xml_files[:int(len(xml_files) * (1 - valid_portion))]
    v2_val_file = xml_files[int(len(xml_files) * (1 - valid_portion)):]

    # 开始复制xml到对应的文件夹
    f1_train_dir = osp.join(foldv1, 'train')
    f1_val_dir = osp.join(foldv1, 'val')

    f2_train_dir = osp.join(foldv2, 'train')
    f2_val_dir = osp.join(foldv2, 'val')

    os.makedirs(f1_train_dir)
    os.makedirs(f2_train_dir)
    os.makedirs(f1_val_dir)
    os.makedirs(f2_val_dir)

    for x in v1_train_file:
        shutil.copy(osp.join(xml_dir, x), osp.join(f1_train_dir, x))

    for x in v1_val_file:
        shutil.copy(osp.join(xml_dir, x), osp.join(f1_val_dir, x))

    for x in v2_train_file:
        shutil.copy(osp.join(xml_dir, x), osp.join(f2_train_dir, x))

    for x in v2_val_file:
        shutil.copy(osp.join(xml_dir, x), osp.join(f2_val_dir, x))


def check_overlop_of_split():
    # 测试 训练集和验证集里是否有数据重叠
    splitted_dir = r"/Users/edy/Data/RuPin/YQSL-RP-static-train-v1/data_split/foldv1"
    # splitted_dir = r"/Users/edy/Data/DB/label_data/data_split/foldv1"
    train_dir = os.path.join(splitted_dir, 'train')
    val_dir = os.path.join(splitted_dir, 'val')

    val_files = list(filter(lambda x: x.endswith('.xml'), os.listdir(val_dir)))
    train_files = list(filter(lambda x: x.endswith('.xml'), os.listdir(train_dir)))

    for file in val_files:
        if file in train_files:
            raise ("there overlop  between train and val")


def xml_to_yolo():
    "将划分训练集验证集后的xml转成yolo格式，一整套流程"
    base_path=r"/Users/edy/Data/RuPin/钻石碗/数据/BN_data"
    whole_xml_dir = osp.join(base_path,"Annotations")
    whole_image_dir = osp.join(base_path,"JPEGImages")
    splited_xml_dir =osp.join(base_path,"data_split","foldv1")

    # whole_xml_dir = r"/Users/edy/Data/DB/label_data/Annotations"
    # whole_image_dir = r"/Users/edy/Data/DB/label_data/Images"
    # splited_xml_dir = r"/Users/edy/Data/DB/label_data/data_split/foldv1"

    drop_rare_cat = False
    min_num = 30
    rare_category_name = [] #避免没有声明报错
    # extra_ignore_categories = ['delete']  # 这里可以手动设置忽略的类别
    extra_ignore_categories = ["bottle"]
    extra_ignore_categories = []

    out_label_dir = osp.join(osp.dirname(splited_xml_dir), 'data_%s' % osp.basename(splited_xml_dir), "labels")
    out_image_dir = osp.join(osp.dirname(splited_xml_dir), 'data_%s' % osp.basename(splited_xml_dir), "images")

    train_xml_dir = osp.join(splited_xml_dir, 'train')
    val_xml_dir = osp.join(splited_xml_dir, 'val')

    label_train_dir = osp.join(out_label_dir, 'train')
    label_val_dir = osp.join(out_label_dir, 'val')
    txt_out_path = osp.join(out_label_dir, 'class_name_to_catid.txt')
    os.makedirs(label_train_dir)
    os.makedirs(label_val_dir)

    if drop_rare_cat:#会把drop 类变成bottle
        json_path = osp.join(osp.dirname(whole_xml_dir), 'voc2coco.json')
        _, rare_category_name = analyse_num_each_class_drop_rare_cat(json_path, False, min_num)
        for x in rare_category_name:
            print("'%s',"%x)
    "从xml文件里得到类名和catid的映射，并存为txt文件"
    # catgory_to_catid = get_categories(glob.glob(osp.join(whole_xml_dir, "*.xml")), rare_category_name+["row", 'bottle'])
    
    merge_category = {
        "东鹏加気能量型营养素饮料罐装355毫升": 'DP11 C355ml-东鹏加气',
        "东鹏特饮维生素功能饮料瓶装250毫升": 'DP2 P250ml-金瓶',
        "东鹏特饮维生素功能饮料瓶装500毫升": 'DP5 P500ml-金瓶',
        "东鹏特饮维生素功能饮料罐装250毫升": 'DP1 C250ml-金罐'
    }
    merge_category = {}
    # rare_category_name这里传入是用于设定忽略哪些类别
    for x in extra_ignore_categories:
        if x not in rare_category_name:
            rare_category_name.append(x)

    catgory_to_catid = get_categories(glob.glob(osp.join(whole_xml_dir, "*.xml")), rare_category_name, merge_category)
    cat_num = len(catgory_to_catid) - len(merge_category)

    # 存储类名和caid的对应字典
    with open(txt_out_path, 'w', encoding='utf-8') as f:
        key_catgory_to_catid = list(catgory_to_catid.keys())
        for id in range(cat_num):
            name, id = key_catgory_to_catid[id], catgory_to_catid[key_catgory_to_catid[id]]
            f.write("%s  %d\n" % (name, id))  # name和catid之间两个英文空格

    # 开始从train val 的xml文件夹转txt
    convert_xml2yolo(glob.glob(osp.join(train_xml_dir, "*.xml")), label_train_dir, catgory_to_catid)
    convert_xml2yolo(glob.glob(osp.join(val_xml_dir, "*.xml")), label_val_dir, catgory_to_catid)

    move_txt_for_category_absence(out_label_dir, cat_num)   #"训练集里少了某个类的目标，从验证集移出去, txt_dir到labels目录"
    move_xml_for_category_absence(val_xml_dir, train_xml_dir, label_train_dir)
    copy_images_from_txt(whole_image_dir, out_label_dir, out_image_dir)


def copy_images_from_txt(whole_image_dir, label_txt_dir, out_image_dir, img_format=".jpg"):
    "根据划分好的train val txt  文件夹，将图片复制到指定位置"

    for dir in os.listdir(label_txt_dir):
        if not os.path.isdir(os.path.join(label_txt_dir, dir)):
            continue

        dst_img_dir = os.path.join(out_image_dir, dir)
        os.makedirs(dst_img_dir)

        for txt in tqdm(filter(lambda x: x.endswith('.txt'), os.listdir(os.path.join(label_txt_dir, dir)))):
            img_name = txt.replace(".txt", img_format)
            shutil.copy(os.path.join(whole_image_dir, img_name), os.path.join(dst_img_dir, img_name))


def out_put_rare_category_and_obs_num():
    "输出实例数量低于阈值的类别名，及其实例数"
    import pandas as pd
    json_path = r"/Users/edy/Downloads/YQSL_Fridge_CptYqsl_image_train_v0/voc2coco.json"
    json_path = r"/Users/edy/Downloads/DP-192/voc2coco.json"
    _, rare_category_name, rare_category_name_obs_num_dict = analyse_num_each_class_drop_rare_cat(json_path, False, 270,
                                                                                                  tocsv=True)

    out_to_csv = True
    if not out_to_csv:
        for key, value in rare_category_name_obs_num_dict.items():
            print(key, "  ", value)

    if out_to_csv:
        # 输出到csv文件
        cate_num_path = "./dp_cat.csv"
        cate_num__dict = {"类名": list(rare_category_name_obs_num_dict.keys()),
                          "目标数": list(rare_category_name_obs_num_dict.values())}
        df = pd.DataFrame(cate_num__dict)
        df.to_csv(cate_num_path)


def output_category_name():
    # with open("/Users/edy/Data/yq1-0-show/de.txt", 'r') as f:
    with open(
            "/Users/edy/Data/RuPin/钻石碗/数据/BN_data/data_split/data_foldv1/labels/class_name_to_catid.txt",
            'r') as f:
        records = f.readlines()

    for rec in records:
        cate_name, cate_id = rec.strip('\n').split('  ')
        print("'%s'," % cate_name)


def move_xml_for_category_absence(origin_xml_dir, dst_xml_dir, dst_txt_dir):
    "用于对标移动txt，这样通过xml来分析txt,  xml的val,xml的train, txt 的train"

    dst_xml_files = list(filter(lambda x: x.endswith('.xml'), os.listdir(dst_xml_dir)))
    move_file_num = 0
    for txt in filter(lambda x: x.endswith('.txt'), os.listdir(dst_txt_dir)):
        xml_name = txt.replace('.txt', '.xml')

        if xml_name not in dst_xml_files:
            shutil.move(osp.join(origin_xml_dir, xml_name), osp.join(dst_xml_dir, xml_name))
            move_file_num += 1

    print("move xml file number is %d" % move_file_num)

    # 检测txt和xml文件夹下文件数量是否一致
    if not len(list(filter(lambda x: x.endswith('.xml'), os.listdir(dst_xml_dir)))) == len(
            list(filter(lambda x: x.endswith('.txt'), os.listdir(dst_txt_dir)))):
        raise ("number of file is error")

def convert_voc_to_yolo_Predefine_cate():
    "传入固定的类名 id  对应关系，将voc转成yolo格式"

    class_txt=r"/Users/edy/Data/RC/mixed/data_split/data_foldv1/labels/class_name_to_catid.txt"
    xml_dir=r"/Users/edy/Data/RC/label_data/new_test_t/Annotations"
    output_dir=osp.join(osp.dirname(xml_dir),"out_txt")

    os.makedirs(output_dir,exist_ok=True)
    PRE_DEFINE_CATEGORIES={}
    with open(class_txt,'r') as f:
        records = f.readlines()

    for rec in records:
        cate_name, cate_id = rec.strip('\n').split('  ')
        PRE_DEFINE_CATEGORIES[cate_name]=cate_id

    convert_xml2yolo(glob.glob(osp.join(xml_dir,"*.xml")),output_dir,PRE_DEFINE_CATEGORIES)

if __name__ == "__main__":
    # random_split()
    # check_overlop_of_split()
    # xml_to_yolo()
    output_category_name()
    # out_put_rare_category_and_obs_num()
    # move_txt_for_category_absence("/Users/edy/Data/yq1-0-show/random_shuffle/data_foldv1/labels",348)
    # move_xml_for_category_absence("/Users/edy/Data/yq1-0-show/random_shuffle/foldv1/val",
    #                               "/Users/edy/Data/yq1-0-show/random_shuffle/foldv1/train",
    #                               "/Users/edy/Data/yq1-0-show/random_shuffle/data_foldv1/labels/train")
    # convert_voc_to_yolo_Predefine_cate()
