import os, sys, shutil
import numpy as np
import os.path as osp
import glob
from tqdm import tqdm as tqdm

file_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(file_path, "..", "..", "..")))

from code_aculat.data_analyse.data_analyse_coco import show_bar, sorted_dict, analyse_num_each_class_drop_rare_cat
from code_aculat.utils.xml_process import analyze_xml
from code_aculat.data_convert.voc2coco import get_categories
from code_aculat.data_analyse.data_analyse import analyse_num_each_class_xml


def extract_data():
    "根据类别名称从其他的数据集里提取数据"
    pass
    # 获取稀缺类的类名

    # 读取另一个数据集的xml
    # 稀缺类在这个xml里占比多少，过低就跳过
    # label过滤会自动跳过非目标类别，只管选xml就是


def get_obs_distr_in_xml():
    "遍历所有xml文件，哪些图片里稀有类的目标占比达到阈值，那么该图可用于数据扩增，扩增前可以先看看数据分布"
    xml_dir = r"/Users/edy/Data/yq1-0-show/yq1_0_show_data_mix_dp/dp_mix_split/foldv1/train"
    json_path = r"/Users/edy/Data/yq1-0-show/yq1_0_show_data_mix_dp"
    portion_value = 0.3  # 稀有类别目标数量占比阈值
    num_min = 100
    json_path = osp.join(json_path, 'voc2coco.json')
    _, rare_category_name, = analyse_num_each_class_drop_rare_cat(json_path=json_path, show=False, num_min=num_min)

    target_xml = []
    rare_class_ob_num = {}
    xml_files = list(filter(lambda x: x.endswith(".xml"), os.listdir(xml_dir)))

    # 遍历这些xml
    for xml in xml_files:
        category_list, _ = analyze_xml(osp.join(xml_dir, xml))

        rare_class_num = 0
        # 获取稀有类目标占比数
        for ob_name in category_list:
            if ob_name in rare_category_name:
                rare_class_num += 1

        if rare_class_num / len(category_list) > portion_value:
            target_xml.append(xml)

            for ob_name in category_list:  # 统计target xml里稀有类对应的目标数量
                if ob_name in rare_category_name:
                    if ob_name not in rare_class_ob_num:
                        rare_class_ob_num[ob_name] = 1
                    else:
                        rare_class_ob_num[ob_name] += 1

            # 某稀有类数量达到阈值则剔除
            num_list = np.array(list(rare_class_ob_num.values()))
            now_rare_class_name = list(rare_class_ob_num.keys())
            idx = np.where(num_list > num_min)[0]
            if len(idx):
                drop_rare_class_name = now_rare_class_name[idx]
            else:
                drop_rare_class_name = []
            for drop_c in drop_rare_class_name:
                rare_category_name.remove(drop_c)

    if len(target_xml):  # 直接设置不同的阈值看看有多少xml文件就行，不用去看一般占比的分布
        # 可视化一下数据的分布
        # 获取类别与catid的对应关系
        xml_with_full_path = [osp.join(xml_dir, xml) for xml in target_xml]
        PRE_DEFINE_CATEGORIES = get_categories(xml_with_full_path)

        # 调用方法可视化xml每类下实例数量分布
        analyse_num_each_class_xml(xml_with_full_path, PRE_DEFINE_CATEGORIES, show=True)


def get_xml_with_target_category():
    "遍历所有xml文件，哪些xml里有兴趣类别，就把这个xml名输出"
    xml_dir = r"/Users/edy/Data/DB/label_data/drop_left61_mg/Annotations"
    dst_dir=r"/Users/edy/Data/DB/label_data/drop_left61_mg/aug/lqs"
    os.makedirs(dst_dir,exist_ok=True)

    target_category = [
        '依仑大白梨汽水500ml',
        '依仑大连汽水500ml',
        '依仑大连汽水桔子味500ml',
        '依仑大连汽水苹果味500ml',
        '依仑大连荔枝味碳酸饮料500ml',
        '依仑小蜜蜂葡萄果味型碳酸饮料500ml',
        '八王寺山楂香梨维生素果汁汽水380ml',
        '八王寺荔枝维生素果汁汽水380ml',
        '八王寺香橙维生素果汁汽水380ml',
        '大窑嘉宾果味汽水520ml',
        '大窑嘉宾果味汽水PET520ml',
        '大窑橙诺520ml',
        '大窑橙诺PET520ml',
        '宏宝莱果汁鲜山楂味饮料玻璃瓶200ml',
        '宏宝莱果汁鲜桔子味汽水饮料玻璃瓶200ml',
        '宏宝莱果汁鲜脐橙味饮料玻璃瓶200ml',
        '宏宝莱生榨山水蜜桃果味饮料300ml',
        '宏宝莱生榨蓝莓苹果复合果味饮料300ml',
        '宏宝莱生榨香橙果肉饮料300ml',
        '宏宝莱老汽水550ml',
        '宏宝莱花生露植物蛋白饮料445ml',
        '宏宝莱花生露植物蛋白饮料玻璃瓶200ml',
        '宏宝莱荔枝味汽水饮料玻璃瓶200ml',
        '宏宝莱荔枝汽水330ml铝罐',
        '旺仔大白梨果味型碳酸饮料310ml',
        '珍珍桃味汽水330ml',
        '珍珍桔味汽水330ml',
        '珍珍荔枝味汽水330ml',
        '秋林格瓦斯PE瓶350ml',
        '秋林黑格瓦斯PE瓶350ml',
    ]
    target_category=['宏宝莱老汽水550ml',]
    xml_num=0
    xml_files = list(filter(lambda x: x.endswith(".xml"), os.listdir(xml_dir)))

    # 遍历这些xml
    for xml in tqdm(xml_files):
        category_list, _ = analyze_xml(osp.join(xml_dir, xml))

        for ob_name in category_list:
            if ob_name in target_category:
                shutil.copy(osp.join(xml_dir,xml),osp.join(dst_dir,xml))
                xml_num+=1
                break

    print("xml num is %d"%xml_num)




def get_aug_images_from_txt():
    "遍历所有txt文件，哪些图片里目标类的目标占比达到阈值，那么该图可用于数据扩增"
    txt_dir = r"/Users/edy/Data/yq1-0-show/yq1_0_show_data_mix_dp/dp_mix_split_ten_dp/data_foldv1/labels/train"
    target_cates = ['DP1 C250ml-金罐',
                    'DP12 C310ml-油柑柠檬茶', 'DP11 C355ml-东鹏加气', 'DP5 P500ml-金瓶']

    portion_value = 0.3  # 稀有类别目标数量占比阈值
    target_txt = []
    catid_to_name = {}

    class_txt = osp.join(osp.dirname(txt_dir), 'class_name_to_catid.txt')

    with open(class_txt, 'r') as f:
        records = f.readlines()

    for rec in records:
        cate_name, cate_id = rec.strip('\n').split('  ')
        print("%d: '%s' " % (int(cate_id), cate_name))  # 输出cate_id 和类名的对应关系，方便查找
        catid_to_name[int(cate_id)] = cate_name

    # 遍历txt
    for txt in glob.glob(osp.join(txt_dir, '*.txt')):
        with open(txt, 'r') as f:
            txt_obs = f.readlines()
        all_num = 0
        target_num = 0
        for obs in txt_obs:
            cate_id, _, _, _, _ = obs.strip('\n').split(' ')
            cate_id = int(cate_id)
            if catid_to_name[cate_id] in target_cates:
                target_num += 1
            all_num += 1
        if target_num / all_num > portion_value:
            target_txt.append(txt)
    print(target_txt)


def get_aug_obs_from_txt():
    "遍历所有txt文件，哪些图片里目标类的数量达到阈值，那么该图可切下来再拼接用于数据扩增"
    txt_dir = r"/Users/edy/Desktop/yolov5/runs/detect/exp117/labels"
    # out_dir=osp.join(osp.dirname(txt_dir),'cp_out')
    # os.makedirs(out_dir)

    target_cates = [
        "依仑大白梨汽水500ml",
"依仑大连汽水500ml",
"依仑大连汽水桔子味500ml",
"依仑大连汽水苹果味500ml",
"依仑大连荔枝味碳酸饮料500ml",
"依仑小蜜蜂葡萄果味型碳酸饮料500ml",
"八王寺山楂香梨维生素果汁汽水380ml",
"八王寺荔枝维生素果汁汽水380ml",
"八王寺香橙维生素果汁汽水380ml",
"大窑嘉宾果味汽水520ml",
"大窑嘉宾果味汽水PET520ml",
"大窑橙诺520ml",
"大窑橙诺PET520ml",
"宏宝莱果汁鲜山楂味饮料玻璃瓶200ml",
"宏宝莱果汁鲜桔子味汽水饮料玻璃瓶200ml",
"宏宝莱果汁鲜脐橙味饮料玻璃瓶200ml",
"宏宝莱生榨山水蜜桃果味饮料300ml",
"宏宝莱生榨蓝莓苹果复合果味饮料300ml",
"宏宝莱生榨香橙果肉饮料300ml",
"宏宝莱老汽水550ml",
"宏宝莱花生露植物蛋白饮料445ml",
"宏宝莱花生露植物蛋白饮料玻璃瓶200ml",
"宏宝莱荔枝味汽水饮料玻璃瓶200ml",
"宏宝莱荔枝汽水330ml铝罐",
"旺仔大白梨果味型碳酸饮料310ml",
"珍珍桃味汽水330ml",
"珍珍桔味汽水330ml",
"珍珍荔枝味汽水330ml",
"秋林格瓦斯PE瓶350ml",
"秋林黑格瓦斯PE瓶350ml", 
        # 'DP12 C310ml-油柑柠檬茶', #'DP11 C355ml-东鹏加气', 'DP5 P500ml-金瓶'
    ]

    num_value = 0  # 稀有类别目标数量阈值
    target_txt = []
    catid_to_name = {}

    class_txt = osp.join(osp.dirname(txt_dir), 'class_name_to_catid.txt')
    class_txt = r"/Users/edy/Data/DB/yq10_db/data_split/data_foldv1/labels/class_name_to_catid.txt"

    with open(class_txt, 'r') as f:
        records = f.readlines()

    for rec in records:
        cate_name, cate_id = rec.strip('\n').split('  ')
        print("%d: '%s' " % (int(cate_id), cate_name))  # 输出cate_id 和类名的对应关系，方便查找
        catid_to_name[int(cate_id)] = cate_name

    # 遍历txt
    for txt in glob.glob(osp.join(txt_dir, '*.txt')):
        with open(txt, 'r') as f:
            txt_obs = f.readlines()
        all_num = 0
        target_num = 0
        for obs in txt_obs:
            cate_id, _, _, _, _ = obs.strip('\n').split(' ')
            cate_id = int(cate_id)
            if catid_to_name[cate_id] in target_cates:
                target_num += 1
            all_num += 1
        if target_num > num_value:
            target_txt.append(txt)
    # for i in target_txt:
    #     print(i, "\n")

    return target_txt


def get_aug_obs_from_txt_copy_files():
    "遍历所有txt文件，哪些图片里目标类的数量达到阈值，那么该图和对应的label输出到指定路径"
    txt_dir = r"/Users/edy/Data/yq1-0-show/yq1_0_show_data_mix_dp/dp_mix_split_ten_dp/second_train/data_foldv1/labels/train"
    image_dir = r"/Users/edy/Data/yq1-0-show/yq1_0_show_data_mix_dp/dp_mix_split_ten_dp/second_train/data_foldv1/images/train"

    prefix = "pixel_aug_"
    out_dir = osp.join(osp.dirname(txt_dir), 'cp_out')
    out_img_dir = osp.join(out_dir, 'images')
    out_txt_dir = osp.join(out_dir, 'labels')
    os.makedirs(out_img_dir, exist_ok=True)
    os.makedirs(out_txt_dir, exist_ok=True)

    target_cates = [
        # 'DP1 C250ml-金罐',
        'DP12 C310ml-油柑柠檬茶',  # 'DP11 C355ml-东鹏加气', 'DP5 P500ml-金瓶'
    ]

    num_value = 6  # 稀有类别目标数量阈值
    target_txt = []
    catid_to_name = {}

    class_txt = osp.join(osp.dirname(txt_dir), 'class_name_to_catid.txt')

    with open(class_txt, 'r') as f:
        records = f.readlines()

    for rec in records:
        cate_name, cate_id = rec.strip('\n').split('  ')
        print("%d: '%s' " % (int(cate_id), cate_name))  # 输出cate_id 和类名的对应关系，方便查找
        catid_to_name[int(cate_id)] = cate_name

    # 遍历txt
    for txt in glob.glob(osp.join(txt_dir, '*.txt')):
        with open(txt, 'r') as f:
            txt_obs = f.readlines()
        all_num = 0
        target_num = 0
        for obs in txt_obs:
            cate_id, _, _, _, _ = obs.strip('\n').split(' ')
            cate_id = int(cate_id)
            if catid_to_name[cate_id] in target_cates:
                target_num += 1
            all_num += 1
        if target_num > num_value:
            target_txt.append(txt)
    for i in tqdm(target_txt):
        shutil.copy(i, osp.join(out_txt_dir, prefix + osp.basename(i)))
        img_name = osp.basename(i).replace('.txt', '.jpg')
        shutil.copy(osp.join(image_dir, img_name), osp.join(out_img_dir, prefix + img_name))

def copy_xml_for_aug():
    "将指定图片用于像素增强后，而且经过测试发现有效，那么数据集的xml也需要更新"
    cp_out_dir=r"/Users/edy/Data/yq1-0-show/yq1_0_show_data_mix_dp/cp_out"  #get_aug_obs_from_txt_copy_files的输出路径
    origin_Ann_dir=r"/Users/edy/Data/yq1-0-show/yq1_0_show_data_mix_dp/dp_mix_split_ten_dp/foldv1/train"  #原始的xml路径

    cp_out_img_dir=osp.join(cp_out_dir,'images')
    prefix = "mask_aug_"    #之前的方法里设置的

    out_Ann_dir=osp.join(cp_out_dir,'Annotations')
    os.makedirs(out_Ann_dir,exist_ok=True)

    img_suffix='.jpg'

    for img in tqdm(list(filter(lambda x: x.endswith(img_suffix),os.listdir(cp_out_img_dir)))):
        origin_xml_name="%s.xml"%img.split(prefix)[1].split('.')[0]
        xml_name='%s.xml'%img.split('.')[0]
        shutil.copy(osp.join(origin_Ann_dir,origin_xml_name),osp.join(out_Ann_dir,xml_name))

def copy_target_xml():
    "get_aug_obs_from_txt()中输出的txt，转而获得它对应的xml，这里因为是元气1.0的数据有问题，这里设置frame开头的，以过滤"
    xml_dir=r"/Users/edy/Data/dele/find1/Annotations"

    out_dir=osp.join(osp.dirname(xml_dir),'copy_target_xml_val')
    out_dir=r"/Users/edy/Data/dele/find1/copy_target_xml_train"
    os.makedirs(out_dir,exist_ok=True)
    target_txt=get_aug_obs_from_txt()

    for txt in target_txt:
        txt=osp.basename(txt)
        if not txt.split('_')[0]=="frame":
            xml_name=txt.replace('.txt','.xml')
            shutil.copy(osp.join(xml_dir,xml_name),osp.join(out_dir,xml_name))
            # shutil.move(osp.join(xml_dir,xml_name),osp.join(out_dir,xml_name))

if __name__ == "__main__":
    # get_obs_distr_in_xml()
    # get_aug_images_from_txt()
    # get_aug_obs_from_txt()
    # get_aug_obs_from_txt_copy_files()
    # copy_xml_for_aug()
    copy_target_xml()
    # get_xml_with_target_category()