import glob
import os
import random

from gxl_ai_utils.utils import utils_file


def do_prepare():
    dataset_name_list = utils_file.load_list_file_clean('dataset_path_list')
    dataset_name_list = utils_file.load_dict_from_yaml('dataset_path_list.yaml')
    dataset_name_list = dataset_name_list.keys()
    utils_file.print_list(dataset_name_list)
    hongfei_dir = '/home/41_data/data4w/shard_1'
    wanghe_dir_1 = "/home/41_data/hwang/huawei_cn_en/cn"
    wanghe_dir_2 = "/home/41_data/hwang/huawei_cn_en/cn2"
    hongfei_name_list = os.listdir(hongfei_dir)
    hongfei_name_list_lower = [name.lower() for name in hongfei_name_list]
    wanghe_name_list_1 = os.listdir(wanghe_dir_1)
    wanghe_name_list_1_lower = [name.lower() for name in wanghe_name_list_1]
    wanghe_name_list_2 = os.listdir(wanghe_dir_2)
    wanghe_name_list_2_lower = [name.lower() for name in wanghe_name_list_2]
    res_dict = {}
    gxl_shards_list_dir = "/home/41_data/xlgeng/shard_list_for_hwang"
    os.makedirs(gxl_shards_list_dir, exist_ok=True)
    for now_dataset_name in dataset_name_list:
        print("开始处理如下数据集: " + now_dataset_name)
        now_dataset_dir = ""
        if now_dataset_name.lower() in hongfei_name_list_lower:
            now_dataset_dir = os.path.join(hongfei_dir,
                                           hongfei_name_list[hongfei_name_list_lower.index(now_dataset_name.lower())])
            now_dataset_dir = os.path.join(now_dataset_dir, 'shards_list.txt')
        elif now_dataset_name.lower() in wanghe_name_list_1_lower:
            now_dataset_dir_temp = os.path.join(wanghe_dir_1,
                                                wanghe_name_list_1[
                                                    wanghe_name_list_1_lower.index(now_dataset_name.lower())])
            now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(now_dataset_dir_temp),
                                           'shards_list.txt')
            shard_path_list = glob.glob(os.path.join(now_dataset_dir_temp, '*.tar'))
            utils_file.write_list_to_file(shard_path_list, now_dataset_dir)

        elif now_dataset_name.lower() in wanghe_name_list_2_lower:
            now_dataset_dir_temp = os.path.join(wanghe_dir_2,
                                                wanghe_name_list_2[
                                                    wanghe_name_list_2_lower.index(now_dataset_name.lower())])
            now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(now_dataset_dir_temp),
                                           'shards_list.txt')
            shard_path_list = glob.glob(os.path.join(now_dataset_dir_temp, '*.tar'))
            utils_file.write_list_to_file(shard_path_list, now_dataset_dir)

        res_dict[now_dataset_name] = dict(path=now_dataset_dir,num_shard=len(utils_file.load_list_file_clean(now_dataset_dir)) if os.path.exists(now_dataset_dir) and now_dataset_dir !="" else 0)
        print("完成处理如下数据集: " + now_dataset_name)
    utils_file.print_dict(res_dict)
    utils_file.write_dict_to_yaml(res_dict, 'dataset_path_list.yaml')

def do_test2():
    gxl_shards_list_dir = "/home/41_data/xlgeng/shard_list_for_hwang"
    # a = "/home/41_data/hwang/huawei_cn_en/cn/tv_split"
    # now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
    #                                'shards_list.txt')
    # shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    # utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    # print(now_dataset_dir)
    # a = "/home/41_data/hwang/huawei_cn_en/cn/primewords"
    # now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
    #                                'shards_list.txt')
    # shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    # utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    # print(now_dataset_dir)
    # a = '/home/41_data/hwang/huawei_cn_en/cn2/commonvoice_zh'
    # now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
    #                                'shards_list.txt')
    # shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    # utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    # print(now_dataset_dir)
    # a = '/home/41_data/hwang/huawei_cn_en/cn2/cctv_hub4_speechin'
    # now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
    #                                'shards_list.txt')
    # shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    # utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    # print(now_dataset_dir)
    # a = '/home/41_data/hwang/huawei_cn_en/cn2/king-asr-206'
    # now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
    #                                'shards_list.txt')
    # shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    # utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    # print(now_dataset_dir)
    a = "/home/41_data/hwang/huawei_cn_en/mix/chonglang500mix"
    now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
                                   'shards_list.txt')
    shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    print(now_dataset_dir)
    a = "/home/41_data/hwang/huawei_cn_en/mix/haoweilai"
    now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
                                   'shards_list.txt')
    shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    print(now_dataset_dir)
    a = "/home/41_data/hwang/huawei_cn_en/mix/huiting_mix"
    now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
                                   'shards_list.txt')
    shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    print(now_dataset_dir)
    a = "/home/41_data/hwang/huawei_cn_en/mix/aishell0056"
    now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
                                   'shards_list.txt')
    shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    print(now_dataset_dir)
    a = "/home/41_data/hwang/huawei_cn_en/mix/biaobei900"
    now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
                                   'shards_list.txt')
    shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    print(now_dataset_dir)
    a = "/home/41_data/hwang/huawei_cn_en/mix/dt300"
    now_dataset_dir = os.path.join(gxl_shards_list_dir, os.path.basename(a),
                                   'shards_list.txt')
    shard_path_list = glob.glob(os.path.join(a, '*.tar'))
    utils_file.write_list_to_file(shard_path_list, now_dataset_dir)
    print(now_dataset_dir)

def do_test3():
    res_dict = utils_file.load_dict_from_yaml('dataset_path_list.yaml')
    a = [dict_i['num_shard'] for dict_i in res_dict.values()]
    print(sum(a))

def do_test_4():
    input_dict = utils_file.load_dict_from_scp('dataset_path_list_en')
    utils_file.print_dict(input_dict)
    output_dict = {}
    for k,v in input_dict.items():
        output_dict[k] = dict(path=v,num_shard=len(utils_file.load_list_file_clean(v)) if os.path.exists(v) else 0)
    utils_file.print_dict(output_dict)
    utils_file.write_dict_to_yaml(output_dict, 'dataset_path_list_en.yaml')
def do_make_all_data():
    cn_input_dict = utils_file.load_dict_from_yaml('dataset_path_list.yaml')
    en_input_dict = utils_file.load_dict_from_yaml('dataset_path_list_en.yaml')
    all_path_list = []
    total_shards = 0
    for dataname, dict_i in en_input_dict.items():
        all_path_list.append(dict_i['path'])
        total_shards += dict_i['num_shard']
    for dataname, dict_i in cn_input_dict.items():
        all_path_list.append(dict_i['path'])
        total_shards += dict_i['num_shard']
    utils_file.print_list(all_path_list)
    all_shard_list = []
    for shard_path in all_path_list:
        all_shard_list.extend(utils_file.load_list_file_clean(shard_path))
    print(len(all_shard_list))
    print(total_shards)
    random.shuffle(all_shard_list)
    utils_file.write_list_to_file(all_shard_list, 'all_shard_list.txt')

def handle_8k_data():
    """"""
    root_dir = "/home/work_nfs8/xlgeng/data/scp_8k"
    utils_file.logging_print('处理8k数据集， 音频单位的txt和wav')

    # dataset_1_name = "E-commerce_customer_service_Mandarin"
    # dataset_1 = "/home/work_nfs5_ssd/yzli/data/kefu_dianshang_149h/data/data/category/G0001"
    # wav_path_list = glob.glob(f'{dataset_1}/**/*.wav', recursive=True)
    # txt_path_list = glob.glob(f'{dataset_1}/**/*.txt', recursive=True)
    # wav_dict = {}
    # txt_dict = {}
    # for wav_path in wav_path_list:
    #     key = os.path.basename(wav_path).split('.')[0]
    #     wav_dict[key] = wav_path
    # for txt_path in txt_path_list:
    #     key = os.path.basename(txt_path).split('.')[0]
    #     txt_dict[key] = txt_path
    # utils_file.write_dict_to_scp(wav_dict, os.path.join(root_dir, dataset_1_name, 'wav.scp'))
    # utils_file.write_dict_to_scp(txt_dict, os.path.join(root_dir, dataset_1_name, 'text'))

    dataset_2_name = "AISHELL_ASR0035"
    dataset_2 = "/home/work_nfs5_ssd/yzli/data/kefu_asr0035_302h"
    utils_file.copy_file(os.path.join(dataset_2, 'wav.scp'), os.path.join(root_dir, dataset_2_name, 'wav.scp'))
    utils_file.copy_file(os.path.join(dataset_2, 'text'), os.path.join(root_dir, dataset_2_name, 'text'))

    dataset_3_name = "DataTang_555h"
    dataset_3 = "/home/work_nfs5_ssd/yzli/data/DataTang_555_kefu"
    utils_file.copy_file(os.path.join(dataset_3, 'wav.scp'), os.path.join(root_dir, dataset_3_name, 'wav.scp'))
    utils_file.copy_file(os.path.join(dataset_3, 'text'), os.path.join(root_dir, dataset_3_name, 'text'))

    dataset_4_name = "jingdu_chusai"
    dataset_4 = "/home/backup_nfs/Node2/yxkong/data_backup/jdd"
    utils_file.copy_file(os.path.join(dataset_4, 'wav.scp'), os.path.join(root_dir, dataset_4_name, 'wav.scp'))
    utils_file.copy_file(os.path.join(dataset_4, 'text'), os.path.join(root_dir, dataset_4_name, 'text'))

    dataset_5_name = "AISHELL_ACCENT"
    dataset_5 = "/home/work_nfs3/qjshao/qjshao_workspace/DATASET/AISHELL_ACCENT_SEG/kaldi_data"
    wav_dict = {}
    txt_dict = {}
    for set_i in ['train', 'test', 'dev']:
        wav_dict_i = utils_file.load_dict_from_scp(os.path.join(dataset_5, set_i, 'wav.scp'))
        txt_dict_i = utils_file.load_dict_from_scp(os.path.join(dataset_5, set_i, 'text'))
        wav_dict.update(wav_dict_i)
        txt_dict.update(txt_dict_i)
    utils_file.write_dict_to_scp(wav_dict, os.path.join(root_dir, dataset_5_name, 'wav.scp'))
    utils_file.write_dict_to_scp(txt_dict, os.path.join(root_dir, dataset_5_name, 'text'))

    dataset_6_name = "duihua_datatangdialogue_985h_seg"
    dataset_6 = "/home/work_nfs3/pkchen/data/wav/duihua_datatangdialogue_985h_seg"
    utils_file.copy_file(os.path.join(dataset_6, 'wav.scp'), os.path.join(root_dir, dataset_6_name, 'wav.scp'))
    utils_file.copy_file(os.path.join(dataset_6, 'text'), os.path.join(root_dir, dataset_6_name, 'text'))

from gxl_ai_utils.utils import utils_data
def make_shard_for_8k():
    root_dir = "/home/work_nfs8/xlgeng/data/scp_8k"
    output_dir = "/home/41_data/xlgeng/shard_data_for_8k"
    dataset_name_list = os.listdir(root_dir)
    for dataset_name in dataset_name_list:
        utils_file.logging_print('处理如下数据集:', dataset_name)
        dataset_dir = os.path.join(root_dir, dataset_name)
        wav_scp_path = os.path.join(dataset_dir, 'wav.scp')
        text_path = os.path.join(dataset_dir, 'text')
        output_dir_i = os.path.join(output_dir, dataset_name)
        utils_data.do_make_shard_file(wav_scp_path, text_path, output_dir_i)


def add_8k_data():
    """"""
    old_all_list = utils_file.load_list_file_clean('./all_shard_list.txt')
    data_root = "/home/41_data/xlgeng/shard_data_for_8k"
    dataset_names = ["jingdu_chusai","E-commerce_customer_service_Mandarin","AISHELL_ACCENT"]
    new_list = []
    for dataset_name in dataset_names:
        list_path_i = os.path.join(data_root, dataset_name, 'shards_list.txt')
        new_list_i = utils_file.load_list_file_clean(list_path_i)
        new_list.extend(new_list_i)
    old_all_list.extend(new_list)
    utils_file.write_list_to_file(old_all_list, './all_shard_list_2.txt')
if __name__ == '__main__':
    # do_prepare()
    # do_test2()
    # do_test3()
    # do_make_all_data()
    add_8k_data()
