import os.path
import random

import tqdm

from gxl_ai_utils.utils import utils_file


def do_split2temp(manifest_dir, temp_dir, prefix="gxldata", partition="train", num=20):
    """"""
    # for recordings
    all_data_path = os.path.join(manifest_dir, f'{prefix}_recordings_{partition}.jsonl')
    row_lines = utils_file.load_list_file_clean(all_data_path)
    row_lines_list = utils_file.do_split_list(row_lines, num)

    all_data_path = os.path.join(manifest_dir, f'{prefix}_supervisions_{partition}.jsonl')
    supervisions_dict_list = utils_file.load_dict_list_from_jsonl(all_data_path)
    supervisions_big_dict = {}
    for supervisions_dict in tqdm.tqdm(supervisions_dict_list, total=len(supervisions_dict_list)):
        supervisions_big_dict[supervisions_dict['id']] = supervisions_dict
    for i in range(1, num + 1):
        output_dir = os.path.join(temp_dir, f'temp_{i}')
        utils_file.makedir_sil(output_dir)
        output_path = os.path.join(output_dir, f'{prefix}_recordings_{partition}.jsonl')
        temp_list = row_lines_list[i - 1]
        utils_file.write_list_to_file(temp_list, output_path)
        gz_path = output_path + '.gz'
        utils_file.do_compress_file_by_gzip(output_path, gz_path)

        temp_recordings_dict_list = utils_file.load_dict_list_from_jsonl(output_path)
        temp_recordings_key_list = [supervisions_dict['id'] for supervisions_dict in temp_recordings_dict_list]
        temp_supervisions_dict_list = []
        for key in tqdm.tqdm(temp_recordings_key_list, total=len(temp_recordings_key_list)):
            temp_supervisions_dict_list.append(supervisions_big_dict[key])
        output_path4supervisions = os.path.join(output_dir, f'{prefix}_supervisions_{partition}.jsonl')
        utils_file.write_dict_list_to_jsonl(temp_supervisions_dict_list, output_path4supervisions)
        gz_path4supervisions = output_path4supervisions + '.gz'
        utils_file.do_compress_file_by_gzip(output_path4supervisions, gz_path4supervisions)

def do_merge():
    """"""
    input_dir = "/home/work_nfs9/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_datahandle_task/manifest2fbank_draw/data_input/fbank/wenetspeech_1"
    dataset_list = os.listdir(input_dir)
    res_list = []
    utils_file.hello_gxl()
    for dataset in dataset_list:
        input_dir_temp = os.path.join(input_dir, dataset)
        utils_file.logging_print("开始处理：", input_dir_temp)
        input_file_path = os.path.join(input_dir_temp, 'gxldata_cuts_train.jsonl')
        if not os.path.exists(input_file_path):
            print(input_file_path, "不存在，跳过")
            continue
        res_list.append(input_file_path)
        # utils_file.do_extract_gz(input_file_path, input_dir_temp)
        # temp_list = utils_file.load_list_file_clean(input_file_path)
        # res_list.extend(temp_list)


    input_dir = "/home/work_nfs9/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_datahandle_task/manifest2fbank_draw/data_input/fbank/wenetspeech_0"
    dataset_list = os.listdir(input_dir)
    # res_list = []
    utils_file.hello_gxl()
    for dataset in dataset_list:
        input_dir_temp = os.path.join(input_dir, dataset)
        utils_file.logging_print("开始处理：", input_dir_temp)
        input_file_path = os.path.join(input_dir_temp, 'gxldata_cuts_train.jsonl')
        if not os.path.exists(input_file_path):
            continue
        res_list.append(input_file_path)
        # utils_file.do_extract_gz(input_file_path, input_dir_temp)
        # temp_list = utils_file.load_list_file_clean(input_file_path)
        # res_list.extend(temp_list)
    # output_path = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_datahandle_task/manifest2fbank_draw/data_input/fbank/gxldata_cuts_train.jsonl"
    # utils_file.write_list_to_file(res_list, output_path)
    # utils_file.do_compress_file_by_gzip(output_path, output_path + '.gz')
    utils_file.print_list(res_list)

def do_merge_2():
    input_path_1 = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/3000h/fbank_common/3000h_cuts_train.jsonl"
    input_path_2 = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_datahandle_task/manifest2fbank_draw/data_input/fbank/gxldata_cuts_train.jsonl"
    output_path =  "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/13000h/fbank_common/13000h_cuts_train.jsonl"

    res_list = utils_file.load_list_file_clean(input_path_1)
    res_list.extend(utils_file.load_list_file_clean(input_path_2))
    random.shuffle(res_list)
    utils_file.write_list_to_file(res_list, output_path)
    utils_file.do_compress_file_by_gzip(output_path, output_path + '.gz')
if __name__ == '__main__':
    input_dir = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_datahandle_task/manifest2fbank_draw/data_input/manifest"
    # 先处理wenetspeech 0 的数据集
    intput_dir_1 = os.path.join(input_dir, "wenetspeech_0", "manifest")
    temp_dir_1 = '/home/work_nfs9/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_datahandle_task/manifest2fbank_draw/data_input/temp/wenetspeech_0'
    do_split2temp(intput_dir_1, temp_dir_1, num=25)
    # # 再处理wenetspeech 1 的数据集
    # intput_dir_2 = os.path.join(input_dir, "wenetspeech_1", "manifest")
    # temp_dir_2 = '/home/work_nfs9/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_datahandle_task/manifest2fbank_draw/data_input/temp/wenetspeech_1'
    # do_split2temp(intput_dir_2, temp_dir_2, num=25)
    # do_merge()