import glob
import os
import random

import tqdm
from lhotse import CutSet

from gxl_ai_utils.utils import utils_file


#  "recording": -> 这个就是records每行的json信息.
#  {"id": "IC0956W0195_perturb_fix",
#  "sources": [
#           {
#           "type": "file",
#           "channels": [0],
#           "source": "/home/work_nfs6/disk2/ASR_data/wav/AISHELL-2/IC0956W0195.wav"
#           }
#           ],
# "sampling_rate": 16000,
# "num_samples": 79904,
# "duration": 4.994,
# "channel_ids": [0]
# }

# "supervisions": [
# {  -----> 这个字典是supervisions每行的json信息
# "id": "IC0956W0195_perturb_fix",
# "recording_id": "IC0956W0195_perturb_fix",
# "start": 0.0,
# "duration": 4.994,
# "channel": 0,
# "text": "但国土部并不会答应地方的要求",
# "language": "Chinese",
# "speaker": "S0901"
# }
# ],

# "id": "IC0956W0195_perturb_fix-0",
# "start": 0, -> 恒等于0
# "duration": 4.994, -> records字典中的duration
# "channel": 0, -> 恒等于0
# "type": "MonoCut"

# "features":  --> 应该不用写
# {
# "type": "kaldi-fbank",
# "num_frames": 499,
# "num_features": 80,
# "frame_shift": 0.01,
# "sampling_rate": 16000,
# "start": 0,
# "duration": 4.994,
# "storage_type": "lilcom_chunky",
# "storage_path": "/home/work_nfs8/xlgeng/new_workspace/icefall/egs/multi_zh_en/ASR/gxl_data/3000h/fbank_common/3000h_feats_train/feats-0.lca",
# "storage_key": "0,47250",
# "channels": 0
# },
def do_manifest2cuts(manifest_dir, output_dir, prefix='gxldata', partition='train'):
    """"""
    if manifest_dir.endswith('/'):
        manifest_dir = manifest_dir[:-1]
    if output_dir.endswith('/'):
        output_dir = output_dir[:-1]
    utils_file.makedir(output_dir)
    input_manifest_supervisions_path = f"{manifest_dir}/{prefix}_supervisions_{partition}.jsonl"
    input_manifest_recordings_path = f"{manifest_dir}/{prefix}_recordings_{partition}.jsonl"
    output_path = f"{output_dir}/{prefix}_cuts_{partition}.jsonl"

    recordings_dict = {}
    dict_list = utils_file.load_dict_list_from_jsonl(input_manifest_recordings_path)
    for dict_i in tqdm.tqdm(dict_list, total=len(dict_list)):
        recordings_dict[dict_i['id']] = dict_i

    supervisions_dict = {}
    dict_list_2 = utils_file.load_dict_list_from_jsonl(input_manifest_supervisions_path)
    for dict_i in tqdm.tqdm(dict_list_2, total=len(dict_list_2)):
        supervisions_dict[dict_i['recording_id']] = dict_i
    res_dict_list = []
    for id_i in tqdm.tqdm(recordings_dict.keys(), total=len(recordings_dict.keys())):
        if id_i not in supervisions_dict:
            continue
        recordings_dict_i = recordings_dict[id_i]
        supervisions_dict_i = supervisions_dict[id_i]
        res_dict_i = {}
        res_dict_i['id'] = id_i + '-0'
        res_dict_i['start'] = 0
        res_dict_i['duration'] = recordings_dict_i['duration']
        res_dict_i['channel'] = 0
        res_dict_i['supervisions'] = [supervisions_dict_i]
        res_dict_i['recording'] = recordings_dict_i
        res_dict_i["type"] = "MonoCut"
        res_dict_list.append(res_dict_i)
    random.shuffle(res_dict_list)
    utils_file.write_dict_list_to_jsonl(res_dict_list, output_path)
    utils_file.do_compress_file_by_gzip(output_path)



if __name__ == '__main__':
    """"""
    manifest_dir = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/wenetspeech_0/manifest"
    partition = 'train'
    prefix = 'gxldata'
    output_dir = './data/cuts/wenetspeech_0'
    do_manifest2cuts(manifest_dir, output_dir, prefix, partition)
