import os
import random

import tqdm
from gxl_ai_utils.utils import utils_file, utils_data
from scipy.io.wavfile import read
from gxl_ai_utils.thread.my_thread import GxlDynamicThreadPool, GxlFixedThreadPool


def get_sample_count_fast(wav_file_path):
    sr, data = read(wav_file_path)
    sample_count = len(data)
    return sample_count, sr


def do_for_aishell4():
    utils_file.logging_print("do_for_aishell4")
    aishell4_raw_scp_dir = '/home/work_nfs7/xlgeng/workspace/wenet_whisper/examples/aishell/s0/dump3/raw/aishell4'
    aishell4_raw_now = '/home/work_nfs6/xlgeng/gxl_data/data_scp/aishell4'
    utils_file.makedir_sil(aishell4_raw_now)
    aishell4_prefix = "/home/work_nfs7/xlgeng/workspace/wenet_whisper/examples/aishell/s0/"
    wav_scp = utils_file.load_dict_from_scp(utils_file.join_path(aishell4_raw_scp_dir, "wav.scp"))
    for k, v in wav_scp.items():
        wav_scp[k] = utils_file.join_path(aishell4_prefix, v)
    utils_file.write_dict_to_scp(wav_scp, utils_file.join_path(aishell4_raw_now, "wav.scp"))
    utils_file.copy_file(utils_file.join_path(aishell4_raw_scp_dir, "text"),
                         utils_file.join_path(aishell4_raw_now, "text"))
    utils_file.logging_print('do_for_aishell4 done')


def little_fun(wav_dict, res_dict, dataset_name):
    print("dataset_name: {}".format(dataset_name))
    num_thread = 10
    if dataset_name == "wenetspeech".lower():
        num_thread = 100
    elif dataset_name == "aishell4".lower():
        num_thread = 70
    elif dataset_name == "Train_Ali_far".lower():
        num_thread = 70
    elif dataset_name == "Train_Ali_near".lower():
        num_thread = 80

    print("num_thread: {}".format(num_thread))
    dict_list = utils_file.do_split_dict(wav_dict, num_thread)
    runner = GxlDynamicThreadPool()
    for dict_i in tqdm.tqdm(dict_list):
        runner.add_task(little_fun_2, [dict_i, res_dict])
    runner.start()


def little_fun_2(wav_dict, res_dict):
    for key, wav_path in tqdm.tqdm(wav_dict.items(), total=len(wav_dict)):
        if not os.path.exists(wav_path):
            utils_file.logging_print("warning: wav_path not exists: {}".format(wav_path))
            continue
        # num, sr = get_sample_count_fast(wav_path)
        # if sr != 16000:
        #     utils_file.logging_print("warning: sr != 16000")
        #     continue
        # res_dict[wav_path] = num
        res_dict.append(wav_path)


def prepare_data():
    """
    将数据集转换为fairseq需要的格式
    Returns:

    """
    # do_for_aishell4()
    scp_path_list = utils_file.load_list_file_clean("scp_path.txt")
    utils_file.print_list(scp_path_list)
    res_list = []
    runner = GxlDynamicThreadPool()
    for scp_path in scp_path_list:
        """"""
        if "WenetSpeech" in scp_path:
            utils_file.logging_print('开始处理: scp_path: {}'.format(scp_path))
            if not os.path.exists(scp_path):
                utils_file.logging_print("warning: scp_path not exists: {}".format(scp_path))
                continue
            dataset_name = (scp_path.split("/")[-2]).lower()
            wav_dict = utils_file.load_dict_from_scp(scp_path)
            little_wav_dict = utils_file.get_random_subdict(wav_dict, 300000)
            utils_file.logging_print(f"dataset_name: {dataset_name}, lens: {len(little_wav_dict)}")
            # runner.add_thread(little_fun, [wav_dict, res_dict, dataset_name])
            res_list.extend(list(little_wav_dict.values()))

        else:
            utils_file.logging_print('开始处理: scp_path: {}'.format(scp_path))
            if not os.path.exists(scp_path):
                utils_file.logging_print("warning: scp_path not exists: {}".format(scp_path))
                continue
            dataset_name = (scp_path.split("/")[-2]).lower()
            wav_dict = utils_file.load_dict_from_scp(scp_path)
            utils_file.logging_print(f"dataset_name: {dataset_name}, lens: {len(wav_dict)}")
            # runner.add_thread(little_fun, [wav_dict, res_dict, dataset_name])
            res_list.extend(list(wav_dict.values()))

    # runner.start()
    res_file_path = "all_data_gxl.tsv"
    # items_list = list(res_dict.items())
    # random.shuffle(items_list)
    random.shuffle(res_list)
    # utils_file.print_dict(res_dict)
    utils_file.logging_print('开始写入: res_file_path: {}'.format(res_file_path))
    with open(res_file_path, "w") as f:
        f.write("/\n")
        # for key, value in res_dict.items():
        #     f.write("{}\t{}\n".format(key, value))
        for key in tqdm.tqdm(res_list, total=len(res_list)):
            f.write("{}\n".format(key))


def add_sample_count():
    """"""
    runner = GxlDynamicThreadPool()
    wav_list = utils_file.load_list_file_clean("./all_data_gxl.tsv")
    wav_list = wav_list[1:]
    thread_num = 200
    res_list = ["/"]
    list_list = utils_file.do_split_list(wav_list, thread_num)
    for list_i in list_list:
        """"""
        runner.add_task(little_fun_do_get_samples, [res_list, list_i])
    runner.start()
    utils_file.write_list_to_file(res_list, "./all_data_with_sample.tsv")


def little_fun_do_get_samples(res_list, wav_path_list):
    for wav_path in tqdm.tqdm(wav_path_list, total=len(wav_path_list)):
        """"""
        samples, _ = utils_data.get_sample_count(wav_path)
        res_list.append(f"{wav_path}\t{samples}")


def prepare_dev_data():
    """
    将数据集转换为fairseq需要的格式
    Returns:

    """
    # do_for_aishell4()
    scp_path_list = utils_file.load_list_file_clean("scp_path.txt")
    utils_file.print_list(scp_path_list)
    res_list = []
    for scp_path in scp_path_list:
        """"""
        utils_file.logging_print('开始处理: scp_path: {}'.format(scp_path))
        if not os.path.exists(scp_path):
            utils_file.logging_print("warning: scp_path not exists: {}".format(scp_path))
            continue
        dataset_name = (scp_path.split("/")[-2]).lower()
        wav_dict = utils_file.load_dict_from_scp(scp_path)
        # runner.add_thread(little_fun, [wav_dict, res_dict, dataset_name])
        res_list.extend(list(wav_dict.values()))
    # runner.start()
    res_file_path = "./dev.tsv"
    # items_list = list(res_dict.items())
    # random.shuffle(items_list)
    random.shuffle(res_list)
    # utils_file.print_dict(res_dict)
    utils_file.logging_print('开始写入: res_file_path: {}'.format(res_file_path))
    res_list_1 = res_list[:1000]
    with open(res_file_path, "w") as f:
        f.write("/\n")
        # for key, value in res_dict.items():
        #     f.write("{}\t{}\n".format(key, value))
        for key in tqdm.tqdm(res_list_1, total=len(res_list_1)):
            f.write("{}\n".format(key))
    res_file_path = "./test.tsv"
    res_list_1 = res_list[1000:1500]
    with open(res_file_path, "w") as f:
        f.write("/\n")
        # for key, value in res_dict.items():
        #     f.write("{}\t{}\n".format(key, value))
        for key in tqdm.tqdm(res_list_1, total=len(res_list_1)):
            f.write("{}\n".format(key))


def fix_data():
    """"""
    dev_path = "./dev_.tsv"
    train_path = "./all_data.tsv"
    path_list = [dev_path]
    for path_i in path_list:
        with open(path_i, "r") as f:
            lines = f.readlines()
        path_list = lines[1:]
        with open(path_i, "w") as f:
            f.write(lines[0])
            for line in path_list:
                f.write(f"{line.strip()}\t{30 * 16000}\n")


def get_dev_test():
    """"""
    all_train_path = "./all_data_with_sample.tsv"
    train_path = "./train_with_samlpe.tsv"
    dev_path = "./dev_with_samlpe.tsv"
    test_path = "./test_with_samlpe.tsv"
    data_list = utils_file.load_list_file_clean(all_train_path)
    data_list = data_list[1:]
    random.shuffle(data_list)
    dev_list = ['/'] + data_list[:1000]
    test_list = ['/'] + data_list[1000:1500]
    train_list = ['/'] + data_list[1500:]
    utils_file.write_list_to_file(dev_list, dev_path)
    utils_file.write_list_to_file(test_list, test_path)
    utils_file.write_list_to_file(train_list, train_path)


if __name__ == "__main__":
    get_dev_test()
