import glob
import os
import random

import tqdm
from lhotse import CutSet

from gxl_ai_utils.utils import utils_file


#  "recording": -> 这个就是records每行的json信息.
#  {"id": "IC0956W0195_perturb_fix",
#  "sources": [
#           {
#           "type": "file",
#           "channels": [0],
#           "source": "/home/work_nfs6/disk2/ASR_data/wav/AISHELL-2/IC0956W0195.wav"
#           }
#           ],
# "sampling_rate": 16000,
# "num_samples": 79904,
# "duration": 4.994,
# "channel_ids": [0]
# }

# "supervisions": [
# {  -----> 这个字典是supervisions每行的json信息
# "id": "IC0956W0195_perturb_fix",
# "recording_id": "IC0956W0195_perturb_fix",
# "start": 0.0,
# "duration": 4.994,
# "channel": 0,
# "text": "但国土部并不会答应地方的要求",
# "language": "Chinese",
# "speaker": "S0901"
# }
# ],

# "id": "IC0956W0195_perturb_fix-0",
# "start": 0, -> 恒等于0
# "duration": 4.994, -> records字典中的duration
# "channel": 0, -> 恒等于0

# "features":  --> 应该不用写
# {
# "type": "kaldi-fbank",
# "num_frames": 499,
# "num_features": 80,
# "frame_shift": 0.01,
# "sampling_rate": 16000,
# "start": 0,
# "duration": 4.994,
# "storage_type": "lilcom_chunky",
# "storage_path": "/home/work_nfs8/xlgeng/new_workspace/icefall/egs/multi_zh_en/ASR/gxl_data/3000h/fbank_common/3000h_feats_train/feats-0.lca",
# "storage_key": "0,47250",
# "channels": 0
# },


def data_handle(manifest_dir, prefix='gxldata', partition='train', output_dir='./gxl_data/on_the_fly'):
    """"""
    if manifest_dir.endswith('/'):
        manifest_dir = manifest_dir[:-1]
    if output_dir.endswith('/'):
        output_dir = output_dir[:-1]
    utils_file.makedir(output_dir)
    input_manifest_supervisions_path = f"{manifest_dir}/{prefix}_supervisions_{partition}.jsonl"
    input_manifest_recordings_path = f"{manifest_dir}/{prefix}_recordings_{partition}.jsonl"
    output_path = f"{output_dir}/{prefix}_cuts_{partition}.jsonl"

    recordings_dict = {}
    dict_list = utils_file.load_dict_list_from_jsonl(input_manifest_recordings_path)
    for dict_i in tqdm.tqdm(dict_list, total=len(dict_list)):
        recordings_dict[dict_i['id']] = dict_i

    supervisions_dict = {}
    dict_list_2 = utils_file.load_dict_list_from_jsonl(input_manifest_supervisions_path)
    for dict_i in tqdm.tqdm(dict_list_2, total=len(dict_list_2)):
        supervisions_dict[dict_i['recording_id']] = dict_i

    res_dict_list = []
    for id_i in tqdm.tqdm(recordings_dict.keys(), total=len(recordings_dict.keys())):
        if id_i not in supervisions_dict:
            continue
        recordings_dict_i = recordings_dict[id_i]
        supervisions_dict_i = supervisions_dict[id_i]
        res_dict_i = {}
        res_dict_i['id'] = id_i + '-0'
        res_dict_i['start'] = 0
        res_dict_i['duration'] = recordings_dict_i['duration']
        res_dict_i['channel'] = 0
        res_dict_i['supervisions'] = [supervisions_dict_i]
        res_dict_i['recording'] = recordings_dict_i
        res_dict_i["type"] = "MonoCut"
        # res_dict_i['features'] = {
        #     "type": "kaldi-fbank",
        #     # "num_frames": 499,
        #     "num_features": 80,
        #     "frame_shift": 0.01,
        #     "sampling_rate": 16000,
        #     "start": 0,
        #     # "duration": 4.994,
        #     "storage_type": "lilcom_chunky",
        #     "storage_path": "/home/work_nfs8/xlgeng/new_workspace/icefall/egs/multi_zh_en/ASR/gxl_data/3000h/fbank_common/3000h_feats_train/feats-0.lca",
        #     # "storage_key": "0,47250",
        #     "channels": 0
        # }
        res_dict_list.append(res_dict_i)
    random.shuffle(res_dict_list)
    utils_file.write_dict_list_to_jsonl(res_dict_list, output_path)
    utils_file.do_compress_file_by_gzip(output_path)
    dev_path = f"{output_dir}/{prefix}_cuts_dev.jsonl.gz"
    trian_path = f"{output_dir}/{prefix}_cuts_train.jsonl.gz"
    utils_file.copy_file(dev_path, trian_path)


def hahaha():
    # import pdb; pdb.set_trace()
    root_dir = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/wenetspeech_0"
    manifiest_path = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/wenetspeech_0/manifest"
    fbank_path = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/wenetspeech_0/fbank"
    # utils_file.do_make_data4icefall(
    #     wav_scp_path=f"{root_dir}/wav.scp",
    #     text_scp_path=f"{root_dir}/text",
    #     manifest_dir=manifiest_path,
    #     fbank_dir=fbank_path,
    #     partition='train',
    #     prefix='wenetspeech_0',
    #     only_fbank=True,
    #     only_manifest=False)
    manifest_dir = os.path.join(root_dir, 'manifest')
    fbank_dir = os.path.join(root_dir, 'fbank_common')
    utils_file.makedir_sil(manifest_dir)
    utils_file.makedir_sil(fbank_dir)
    utils_file._do_compute_fbank4icefall(
        manifests_dir=manifest_dir,
        fbank_dir=fbank_dir,
        partition='train',
        prefix='gxldata',
        perturb_speed=True
    )


def make_cuts_from_scp(input_kaldi_dir, output_dir, prefix='gxldata', patition='train'):
    import lhotse
    assert os.path.exists(input_kaldi_dir) and os.path.exists(
        os.path.join(input_kaldi_dir, 'wav.scp')) and os.path.exists(os.path.join(input_kaldi_dir, 'text'))
    if not os.path.exists(os.path.join(input_kaldi_dir, 'utt2spk')):
        utils_file.do_get_utt2spk(input_kaldi_dir)
    utils_file.logging_print("开始load manifests")
    records, superisions, _ = lhotse.load_kaldi_data_dir(input_kaldi_dir, 16000)
    utils_file.logging_print("开始fix manifests")
    records, superisions = lhotse.fix_manifests(records, superisions)
    utils_file.logging_print("开始validate manifests")
    lhotse.validate_recordings_and_supervisions(records, superisions)
    records.to_file(os.path.join(output_dir, f"{prefix}_recordings_{patition}.jsonl"))
    utils_file.do_compress_file_by_gzip(os.path.join(output_dir, f"{prefix}_recordings_{patition}.jsonl"))
    superisions.to_file(os.path.join(output_dir, f"{prefix}_supervisions_{patition}.jsonl"))
    utils_file.do_compress_file_by_gzip(os.path.join(output_dir, f"{prefix}_supervisions_{patition}.jsonl"))
    cuts = CutSet.from_manifests(recordings=records, supervisions=superisions)
    cuts.to_file(os.path.join(output_dir, f"{prefix}_cuts_{patition}.jsonl"))
    utils_file.do_compress_file_by_gzip(os.path.join(output_dir, f"{prefix}_cuts_{patition}.jsonl"))


def make_cuts_from_scp_multi_thread(input_kaldi_dir='/home/work_nfs8/xlgeng/new_workspace/all_data_scp/aishell1',
                                    output_dir = './data_output',
                                    prefix='gxldata',
                                    partition='train',
                                    temp_dir = './temp_dir',
                                    thread_num = 100):
    """"""
    timer_obj = utils_file.GxlTimer()
    temp_dir = utils_file.do_remove_last_slash(temp_dir)
    output_dir = utils_file.do_remove_last_slash(output_dir)
    utils_file.logging_print("开始load kaldi dir")
    # 这个目录下要确保有wav.scp text
    wav_dict = utils_file.load_dict_from_scp(input_kaldi_dir + '/wav.scp')
    text_dict = utils_file.load_dict_from_scp(input_kaldi_dir + '/text')
    wav_dict_list = utils_file.do_split_dict(wav_dict, thread_num)
    runner = utils_file.GxlDynamicThreadPool()
    for i, wav_dict_i in enumerate(wav_dict_list):
        text_dict_i = {k: text_dict[k] for k in wav_dict_i.keys() if k in text_dict}
        temp_dir_i = f'{temp_dir}/split_{i}'
        utils_file.makedir_sil(temp_dir_i)
        utils_file.write_dict_to_scp(wav_dict_i, f'{temp_dir_i}/wav.scp')
        utils_file.write_dict_to_scp(text_dict_i, f'{temp_dir_i}/text')
        utils_file.do_get_utt2spk(temp_dir_i)
        output_dir_i = f'{output_dir}/split_{i}'
        utils_file.makedir_sil(output_dir_i)
        runner.add_thread(make_cuts_from_scp, [temp_dir_i, output_dir_i, prefix, partition])
    runner.start()
    timer_obj.stop_halfway()

def hahaha3():
    """"""
    output_dir_root = './data_output'
    dataname='aishell1'
    data_1_kaldi_path = f'/home/work_nfs8/xlgeng/new_workspace/all_data_scp/{dataname}'
    # data_1_kaldi_path = '/home/work_nfs7/xlgeng/data/asr_data_inventory/librispeech'
    # test_dir = '/home/work_nfs8/xlgeng/data/scp_test'
    data_output_dir = f'{output_dir_root}/{dataname}'
    utils_file.makedir_sil(data_output_dir)
    utils_file.do_make_cuts_from_scp_multi_thread(data_1_kaldi_path, data_output_dir, dataname, thread_num=20)
    # dataname_list = os.listdir(test_dir)
    # for dataname in dataname_list:
    #     utils_file.logging_print(f'开始{dataname}')
    #     if not os.path.exists(os.path.join(test_dir, dataname,'wav.scp')):
    #         continue
    #     input_kaldi_dir = os.path.join(test_dir, dataname)
    #     output_dir_dataname = os.path.join(output_dir_root, dataname)
    #     utils_file.makedir_sil(output_dir_dataname)
    #     utils_file.do_make_cuts_from_scp_multi_thread(input_kaldi_dir,output_dir_dataname,prefix=dataname, thread_num=20)
def hahaha4():
    """"""
    output_dir_root = './data_output'
    dataname='AISHELL-2'
    data_1_kaldi_path = f'/home/work_nfs8/xlgeng/new_workspace/all_data_scp/{dataname}'
    # data_1_kaldi_path = '/home/work_nfs7/xlgeng/data/asr_data_inventory/librispeech'
    # test_dir = '/home/work_nfs8/xlgeng/data/scp_test'
    data_output_dir = f'{output_dir_root}/{dataname}'
    utils_file.makedir_sil(data_output_dir)
    utils_file.do_make_cuts_from_scp_multi_thread(data_1_kaldi_path, data_output_dir, dataname, thread_num=20)
    # dataname_list = os.listdir(test_dir)
    # for dataname in dataname_list:
    #     utils_file.logging_print(f'开始{dataname}')
    #     if not os.path.exists(os.path.join(test_dir, dataname,'wav.scp')):
    #         continue
    #     input_kaldi_dir = os.path.join(test_dir, dataname)
    #     output_dir_dataname = os.path.join(output_dir_root, dataname)
    #     utils_file.makedir_sil(output_dir_dataname)
    #     utils_file.do_make_cuts_from_scp_multi_thread(input_kaldi_dir,output_dir_dataname,prefix=dataname, thread_num=20)

def hahaha5():
    """"""
    output_dir_root = './data_output/test'
    # dataname='AISHELL-2'
    # data_1_kaldi_path = f'/home/work_nfs8/xlgeng/new_workspace/all_data_scp/{dataname}'
    # # data_1_kaldi_path = '/home/work_nfs7/xlgeng/data/asr_data_inventory/librispeech'
    test_dir = '/home/work_nfs8/xlgeng/data/scp_test'
    # data_output_dir = f'{output_dir_root}/{dataname}'
    # utils_file.makedir_sil(data_output_dir)
    # utils_file.do_make_cuts_from_scp_multi_thread(data_1_kaldi_path, data_output_dir, dataname, thread_num=20)
    dataname_list = os.listdir(test_dir)
    for dataname in dataname_list:
        utils_file.logging_print(f'开始{dataname}')
        if not os.path.exists(os.path.join(test_dir, dataname,'wav.scp')):
            continue
        input_kaldi_dir = os.path.join(test_dir, dataname)
        output_dir_dataname = os.path.join(output_dir_root, dataname)
        utils_file.makedir_sil(output_dir_dataname)
        utils_file.do_make_cuts_from_scp_multi_thread(input_kaldi_dir,output_dir_dataname,prefix=dataname, thread_num=20)



import torch

if __name__ == '__main__':
    """"""
    hahaha5()