import os.path
import random

import torch
import tqdm

from gxl_ai_utils.utils import utils_file


def do_split_scp_for_mk_k2_fbank(manifest_dir, fbank_dir, num_split=20):
    """"""
    # for recordings
    prefix = "gxldata"
    partition = "train"
    all_data_path = os.path.join(manifest_dir, f'{prefix}_recordings_{partition}.jsonl')
    row_lines = utils_file.load_list_file_clean(all_data_path)
    row_lines_list = utils_file.do_split_list(row_lines, num_split)

    # for supervisions
    all_data_path = os.path.join(manifest_dir, f'{prefix}_supervisions_{partition}.jsonl')
    supervisions_dict_list = utils_file.load_dict_list_from_jsonl(all_data_path)
    supervisions_big_dict = {}
    for supervisions_dict in tqdm.tqdm(supervisions_dict_list, total=len(supervisions_dict_list)):
        supervisions_big_dict[supervisions_dict['id']] = supervisions_dict
    for i in utils_file.tqdm(range(num_split), desc=f'spliting', total=num_split):
        output_dir = os.path.join(fbank_dir, "temp", f'temp_{i}')
        utils_file.makedir_sil(output_dir)
        output_path = os.path.join(output_dir, f'{prefix}_recordings_{partition}.jsonl')
        temp_list = row_lines_list[i]
        utils_file.write_list_to_file(temp_list, output_path)
        gz_path = output_path + '.gz'
        utils_file.do_compress_file_by_gzip(output_path, gz_path)

        temp_recordings_dict_list = utils_file.load_dict_list_from_jsonl(output_path)
        temp_recordings_key_list = [supervisions_dict['id'] for supervisions_dict in temp_recordings_dict_list]
        temp_supervisions_dict_list = []
        for key in tqdm.tqdm(temp_recordings_key_list, total=len(temp_recordings_key_list)):
            if not key in supervisions_big_dict:
                utils_file.logging_print(f'{key} not in supervisions_big_dict')
                continue
            temp_supervisions_dict_list.append(supervisions_big_dict[key])
        output_path4supervisions = os.path.join(output_dir, f'{prefix}_supervisions_{partition}.jsonl')
        utils_file.write_dict_list_to_jsonl(temp_supervisions_dict_list, output_path4supervisions)
        gz_path4supervisions = output_path4supervisions + '.gz'
        utils_file.do_compress_file_by_gzip(output_path4supervisions, gz_path4supervisions)


def do_concat_for_k2_fbank(fbank_dir, num_split):
    """"""
    res_list = []
    input_dir = os.path.join(fbank_dir, "temp")
    for i in utils_file.tqdm(range(num_split), desc=f'merging', total=num_split):
        input_dir_temp = os.path.join(input_dir, f'temp_{i}')
        utils_file.logging_print("开始处理：", input_dir_temp)
        input_file_path = os.path.join(input_dir_temp, 'gxldata_cuts_train.jsonl.gz')
        if not os.path.exists(input_file_path):
            continue
        utils_file.do_extract_gz(input_file_path)
        input_file_path = os.path.join(input_dir_temp, 'gxldata_cuts_train.jsonl')
        res_list.extend(utils_file.load_list_file_clean(input_file_path))

    output_dir = fbank_dir
    output_file_path = os.path.join(output_dir, 'gxldata_cuts_train.jsonl')
    utils_file.write_list_to_file(res_list, output_file_path)
    gz_path = output_file_path + '.gz'
    utils_file.do_compress_file_by_gzip(output_file_path, gz_path)


def do_make_fbank_from_split(fbank_dir, num_split=20):
    """"""
    temp_dir = os.path.join(fbank_dir, "temp")
    for i in utils_file.tqdm(range(num_split), desc=f'making', total=num_split):
        input_dir_temp = os.path.join(temp_dir, f'temp_{i}')
        utils_file.logging_print("开始处理：", input_dir_temp)
        temp_fbank_dir = input_dir_temp
        temp_manifest_dir = input_dir_temp
        utils_file._do_compute_fbank4icefall(
            fbank_dir=temp_fbank_dir,
            manifests_dir=temp_manifest_dir,
        )


def do_make_fbank(manifest_dir, fbank_dir, num_split = 20):
    do_split_scp_for_mk_k2_fbank(manifest_dir, fbank_dir, num_split)
    do_make_fbank_from_split(fbank_dir, num_split)
    do_concat_for_k2_fbank(fbank_dir, num_split)

def do_filter(fbank_dir):
    """"""
    output_file_path = os.path.join(fbank_dir, 'gxldata_cuts_train.jsonl')
    dict_list = utils_file.load_dict_list_from_jsonl(output_file_path)
    res_dict_list = []
    for dict_temp in tqdm.tqdm(dict_list, total=len(dict_list)):
        """"""
        duration = dict_temp['duration']
        if duration < 0.5:
            continue
        supervisions = dict_temp['supervisions']
        if len(supervisions) == 0:
            continue
        else:
            if len(supervisions[0]['text']) < 2:
                continue
        res_dict_list.append(dict_temp)
    utils_file.write_dict_list_to_jsonl(res_dict_list, output_file_path)
    gz_path = output_file_path + '.gz'
    utils_file.do_compress_file_by_gzip(output_file_path, gz_path)


if __name__ == '__main__':
    torch.set_num_threads(1)
    torch.set_num_interop_threads(1)
    for partition in ['train','dev1','dev2']:
        fbank_dir = f"/home/work_nfs13/xlgeng/data/seame/{partition}/k2_data/train_fbank"
        manifest_dir = f"/home/work_nfs13/xlgeng/data/seame/{partition}/k2_data/train_manifest"
        do_concat_for_k2_fbank(fbank_dir, 20)
        do_filter(fbank_dir)

