import glob
import random

from gxl_ai_utils.utils import utils_file
import os

pass_name = ['train_l', "WenetSpeech", "8k_aishell"]
all_name = {
    '201810-500': " 500h, 室外，噪音",
    "863_rasc": " 863H, 朗读，安静， 偏南方",
    "aishell1": " 150h, 朗读， 安静。标准普通话",
    "AISHELL-2": " 1000h, 朗读，三种场景，各自333h，同一句话让三个人在三种录制环境下朗读。安静，略微吵杂，略微吵杂",
    "AISHELL_ACCENT_SEG": " 约686h(按照每条3.6s), 广西普通话， 极少部分转录不太严谨。安静",
    # "AISHELL-ASR0025":" ~500h, 中国人说英语，-------------------delete",
    "aishell_kefu_4000h": " 4000h, 8K ,  实际场景， 安静，",
    "aishell_speech_data": " 1000h , 演讲， 演讲场景，发布会，有中英混",
    "AISHELL-VPR0061": " 500h, 俺有中英混， 朗读，安静",
    "ANDROID": " 1000h , 朗读， 少量英文单词",
    "Android-100speaker": " 50h, 朗读，安静",
    "ASRU700": " 700h , 朗读，安静",
    # "biaobei_460":" 460h, 中国人说英语------delete",
    "biaobei900_8k": " 中英混， 朗读，安静, 900h,",
    "CCTV_HUB4_speechin": " ~66h, 新闻联播",
    "CLMix_8k": " 100h, 16K , 中英混",
    # "commonvoice":" 36h, 不可用。---------------------------
    # "aishell0056_8k":"8k ,bukeyong---------------- # 500h
    # "8k_aishell":" 8k, bukeyong -------------------- # 300h
    "CSLT_public_data": " 10小时， 新闻主播腔调",
    # "DataTang_1505h":" 不可用  3/10标注错误 ---------------------delete
    "DataTang_kid": " 儿童朗读， 200h, 安静",
    "datatang_numstr": " 200h, read , quiet, 音频前半段普遍长静音",
    "datatangaccent": " 500h, 北方方言，偏向陕西",
    "DataTang_555_kefu": " 555小时， 客服，电话环境， 8K",
    "datatangread": " 朗读， 8K， 300H",
    "dt300": " 朗读，300h, 重度中英混合，具有多个单词连续出现的情况",
    "duihua_apy_1420h": " 1420h, 生活场景， 安静",
    "duihua_datatangdialogue_985h_seg": " 客服对话， 1000h, 8K , 安静",
    "duihua_huiting_1000h": " 1000h , 生活场景， 安静",
    "ears": " 300h, 生活场景，8K， 有轻微背景杂音，像是采访录音",
    "ench_haoweilai_587h": " 587h ,英语课堂的中英混， 文本需要处理， 所有连续的英文词汇使用_连接",
    "haitian-Android-238speaker": " 70h ,多说话人， 朗读，安静",
    "HTMix_8k": " 150h, 中英混，朗读。",
    "huiting-dongbei": " 50h , read , 较为安静",
    "Huiting300": " 300小时， 生活场景，安静",
    "huiting400": " 400h, 生活场景， 安静",
    "huiting3000": " 3000h, read , quiet",
    "jdd": " 120小时， life, quiet, 8K",
    "kefu_sougou_65h": " 8K , 65H , 客服，",
    "King-ASR-018": " 100h ,新闻播音，安静",
    "King-ASR-018-desktop": " 44.1K , 2h, read ,quiet",
    "King-ASR-059": " 500小时， 新闻，朗读",
    "King-ASR-118-mobile": " 20h , 端侧录音，朗读，具有端侧录音的小噪音，总体安静",
    "King-ASR-166": " 60h,新闻，安静",
    "King-ASR-216": " 1500h, 生活场景，安静",
    # "King-ASR-L-206":" 8K ,完全不可用------------------ 20h",
    "magicdata755": " 755h, 朗读",
    "MIC": " 1000h , 声量比较小， 轻度中英混，朗读， 安静， 44.1K",
    "news": " 200h,  标准新闻",
    "primewords_md_2018_set1": " 100h ， 朗读， 安静",
    "putonghua500h": " 500h, 安静",
    "tv_split": " 500h, 电视节目的切片， 部分有背景音乐，不分是口音",
    "yuzhiputonghua300_16k": " 300h, 云芝普通话， 标准朗读",
    "yuzhishoujiluyin": " 700H, 8k ,手机录音，轻度中英混",
    # "yuzhiyuyinzhushou600_16k":" 8K , 600h, 声音经过特殊处理，听起来像弱智成人。------delete",
    "yuzhiyuyinzhushou700_16k": " 700h , 朗读，",
    "ztedata": " 10h, 部分含有轻微口音",
    # total USEED 28559h +10000 wenetspeech = 38559h",
    "wenetspeech": "10000h, 网络环境",
    "aishell4": " 120h, 会议",
    "Train_Ali_near": " 120h, 会议",
    "Train_Ali_far": " 120h, 会议",
}

# dataset_dir_path_list, _ = utils_file.do_listdir(input_dir)
# all_wav_num = 0
# for dataset_dir_path in dataset_dir_path_list:
#     wav_scp_path_i = os.path.join(dataset_dir_path, "wav.scp")
#     text_scp_path_i = os.path.join(dataset_dir_path, "text")
#     lines_num = utils_file.do_get_file_rows_num_shell(wav_scp_path_i)
#     all_wav_num += lines_num
#     print(f'{dataset_dir_path} wav num: {lines_num}')
# print(f'all wav num: {all_wav_num}')

xlsx_dict = {}
xlsx_dict['dataset_name'] = ['row_path', 'shard_path', "discription", "paraformer_wer"]
output_xlsx_path = "./chinese_data4W_3.xlsx"
output_json_path = "./chinese_data4W_3.json"
if os.path.exists(output_xlsx_path):
    xlsx_dict = utils_file.load_data_from_xlsx(output_xlsx_path, return_cols=False)

input_dir = "/home/work_nfs5_ssd/hfxue/data/data4w/source_1"
wenetspeech_dir = "/home/work_nfs14/xlgeng/asr_data_shard/wenetspeech_new_all"


# 首先进行paraformer推理 和 归纳row path
# 归纳row path
def get_row_path():
    for dataname, disc_str in all_name.items():
        print(f'handleing {dataname}')
        if dataname == "wenetspeech":
            wav_scp_path_i = os.path.join(wenetspeech_dir, "wav.scp")
            text_scp_path_i = os.path.join(wenetspeech_dir, "text")
        elif dataname == "aishell4" or dataname == "Train_Ali_near" or dataname == "Train_Ali_far":
            wav_scp_path_i = "unknown"
            text_scp_path_i = "unknown"
        else:
            wav_scp_path_i = os.path.join(input_dir, dataname, "wav.scp")
            text_scp_path_i = os.path.join(input_dir, dataname, "text")
        row_path = f"{wav_scp_path_i}|{text_scp_path_i}"
        disc_str = f"{disc_str}"
        xlsx_dict[dataname] = [row_path, "unknown", disc_str, "unknown"]
    utils_file.write_dict_to_xlsx(xlsx_dict, output_xlsx_path, cols_pattern=False)
    utils_file.write_dict_to_json(xlsx_dict, output_json_path)


def little_func_refer_from_little_dict(little_dict, gpu_id):
    for dataname, value_list in little_dict.items():
        print(f'infering {dataname}')
        row_path = value_list[0]
        if row_path == "unknown｜unknown" or "|" not in row_path:
            utils_file.logging_print(f'{dataname} row_path is unknown, skip')
            continue
        wav_scp_path = row_path.split("|")[0]
        text_scp_path = row_path.split("|")[1]
        wav_dict = utils_file.load_dict_from_scp(wav_scp_path)
        text_dict = utils_file.load_dict_from_scp(text_scp_path)
        wav_dict_1000 = utils_file.do_get_random_subdict(wav_dict, 1000)
        text_dict_1000 = {key: text_dict[key] for key in wav_dict_1000.keys()}
        wav_temp_path = utils_file.do_get_fake_file()
        text_temp_path = utils_file.do_get_fake_file()
        utils_file.write_dict_to_scp(wav_dict_1000, wav_temp_path)
        utils_file.write_dict_to_scp(text_dict_1000, text_temp_path)
        wer_float = utils_file.do_inference_paraformer_return_wer(wav_temp_path, text_temp_path, gpu_id)
        utils_file.remove_file(wav_temp_path)
        utils_file.remove_file(text_temp_path)
        xlsx_dict[dataname][3] = wer_float


def do_paraformer_infer():
    dict_list = utils_file.do_split_dict(xlsx_dict, 8)
    runner = utils_file.GxlDynamicThreadPool()
    for i, little_dict in enumerate(dict_list):
        gpu_id = i
        runner.add_task(little_func_refer_from_little_dict, [little_dict, gpu_id])
    runner.run()
    utils_file.write_dict_to_xlsx(xlsx_dict, output_xlsx_path, cols_pattern=False)
    utils_file.write_dict_to_json(xlsx_dict, output_json_path)


def do_infer_one_by_one():
    """"""
    for dataname, value_list in xlsx_dict.items():
        print(f'infering {dataname}')
        row_path = value_list[0]
        if row_path == "unknown|unknown" or "|" not in row_path:
            utils_file.logging_print(f'{dataname} row_path is unknown, skip')
            continue
        wav_scp_path = row_path.split("|")[0]
        text_scp_path = row_path.split("|")[1]
        print(f'{wav_scp_path} {text_scp_path}')
        wav_dict = utils_file.load_dict_from_scp(wav_scp_path)
        text_dict = utils_file.load_dict_from_scp(text_scp_path)
        wav_dict_1000 = utils_file.do_get_random_subdict(wav_dict, 100)
        text_dict_1000 = {key: text_dict[key] for key in wav_dict_1000.keys()}
        wav_temp_path = utils_file.do_get_fake_file()
        text_temp_path = utils_file.do_get_fake_file()
        utils_file.write_dict_to_scp(wav_dict_1000, wav_temp_path)
        utils_file.write_dict_to_scp(text_dict_1000, text_temp_path)
        print(f'{wav_temp_path} {text_temp_path}')
        wer_float = utils_file.do_inference_paraformer_return_wer(wav_temp_path, text_temp_path, 1)
        value_list[3] = wer_float
        xlsx_dict[dataname] = value_list
        utils_file.write_dict_to_xlsx(xlsx_dict, output_xlsx_path, cols_pattern=False)
        utils_file.write_dict_to_json(xlsx_dict, output_json_path)



def make_shard():
    """"""
    output_dir = "/home/41_data/data4w/shard_1"
    output_dir2 = "/home/41_data/xlgeng/data/shards"
    output_dir3 = "/home/work_nfs13/yhdai/data/data_shard"
    exsit_datanames = os.listdir(output_dir)
    exsit_datanames2 = os.listdir(output_dir2)
    exist_datanames3 = os.listdir(output_dir3)
    for dataname, value_list in xlsx_dict.items():
        print(f'infering {dataname}')
        if dataname in exsit_datanames:
            utils_file.logging_print(f'{dataname} already exist111111111111111111, skip')
            if len(glob.glob(os.path.join(output_dir, dataname, "*_list.txt"))) <1:
                if dataname == "duihua_datatangdialogue_985h_seg":
                    dir_path_temp = os.path.join(output_dir, dataname)
                    finished_path_list = glob.glob(os.path.join(dir_path_temp, "*.finished"))
                    tar_path_list = [item.replace(".finished", ".tar") for item in finished_path_list]
                    shard_file_path = os.path.join(dir_path_temp, 'shards_list.txt')
                    utils_file.write_list_to_file(tar_path_list, shard_file_path)
                    value_list[1] = shard_file_path
                    xlsx_dict[dataname] = value_list
                    utils_file.logging_print('getted shard file path: %s' % shard_file_path)
                    continue
                continue
            shard_file_path = glob.glob(os.path.join(output_dir, dataname, "*_list.txt"))[0]
            value_list[1] = shard_file_path
            xlsx_dict[dataname] = value_list
            utils_file.logging_print('getted shard file path: %s' % shard_file_path)
            continue
        if dataname in exsit_datanames2:
            utils_file.logging_print(f'{dataname} already exist22222222222222222222, skip')
            if len(glob.glob(os.path.join(output_dir2, dataname, "*_list.txt"))) < 1:
                continue
            shard_file_path = glob.glob(os.path.join(output_dir2, dataname, "*_list.txt"))[0]
            value_list[1] = shard_file_path
            xlsx_dict[dataname] = value_list
            utils_file.logging_print('getted shard file path: %s' % shard_file_path)
            # utils_file.write_dict_to_xlsx(xlsx_dict, output_xlsx_path, cols_pattern=False)
            # utils_file.write_dict_to_json(xlsx_dict, output_json_path)
            continue
        if dataname =="wenetspeech":
            value_list[1] = glob.glob(os.path.join("/home/work_nfs14/xlgeng/asr_data_shard/wenetspeech_new_all", "*_list.txt"))[0]
            xlsx_dict[dataname] = value_list
            utils_file.logging_print('getted shard file path: %s' % value_list[1] )
            # utils_file.write_dict_to_xlsx(xlsx_dict, output_xlsx_path, cols_pattern=False)
            # utils_file.write_dict_to_json(xlsx_dict, output_json_path)
            continue
        dataname1 = dataname.lower()
        if dataname1 in exist_datanames3:
            utils_file.logging_print(f'{dataname} already exist3333333333333333, skip')
            shard_file_path = os.path.join(output_dir2, dataname, "shards_list.txt")
            tar_path_list = glob.glob(os.path.join(output_dir3, dataname1, "*.tar"))
            utils_file.write_list_to_file(tar_path_list, shard_file_path)
            value_list[1] = shard_file_path
            xlsx_dict[dataname] = value_list
            utils_file.logging_print('getted shard file path: %s' % shard_file_path)
            # utils_file.write_dict_to_xlsx(xlsx_dict, output_xlsx_path, cols_pattern=False)
            # utils_file.write_dict_to_json(xlsx_dict, output_json_path)
            continue

    output_xlsx_path2 = "./chinese_data4W.xlsx"
    output_json_path2 = "./chinese_data4W.json"
    paraformer_info_dict = utils_file.load_dict_from_json(output_json_path2)
    for key, value_list in xlsx_dict.items():
        """"""
        if key in paraformer_info_dict:
            paraformer_info = paraformer_info_dict[key][3]
            value_list[3] = paraformer_info
            xlsx_dict[key] = value_list
    utils_file.write_dict_to_xlsx(xlsx_dict, output_xlsx_path, cols_pattern=False)
    utils_file.write_dict_to_json(xlsx_dict, output_json_path)
        # row_path = value_list[0]
        # if row_path == "unknown|unknown" or "|" not in row_path:
        #     utils_file.logging_print(f'{dataname} row_path is unknown, skip')
        #     continue
        # wav_scp_path = row_path.split("|")[0]
        # text_scp_path = row_path.split("|")[1]
        # print(f'{wav_scp_path} {text_scp_path}')
        # output_dir_temp = os.path.join(output_dir, dataname)
        # utils_file.do_make_shard_file(wav_scp_path, text_scp_path, output_dir_temp)


def tongji_wer():
    # wer_bur = 6
    for wer_bur in range(27):
        total_num = 0
        dict_res = utils_file.load_dict_from_json('./chinese_data4W.json')
        for dataname, value_list in dict_res.items():
            hours = utils_file.do_extract_first_number(value_list[2])
            wer_num = value_list[3]
            if wer_num == 'unknown' or wer_num == 'unknown' or dataname == "dataset_name":
                continue
            # print(f'{dataname} {hours} {wer_num}')
            if float(wer_num) < wer_bur:
                total_num += hours
        print(f'{wer_bur} total hours {total_num}')

def get_data_list(from_wer_bur):
    """"""
    dict_res = utils_file.load_dict_from_json('./chinese_data4W_3.json')
    dict_res.pop('dataset_name')
    res_list = []
    output_path = f'./gxl_data/shard_list_from_wer_bur_{from_wer_bur}.txt'
    for dataname, value_list in dict_res.items():
        """"""
        shard_path= value_list[1]
        wer_num = value_list[3]
        temp_list = utils_file.load_list_file_clean(shard_path)
        if float(wer_num) < from_wer_bur:
            res_list += temp_list
    random.shuffle(res_list)
    utils_file.write_list_to_file(res_list, output_path)



if __name__ == "__main__":
    # make_shard()
    get_data_list(10)




