import os.path
import random
import sys
import time
# sys.path.insert(0,'../../../../')
from gxl_ai_utils.utils import utils_file
import torch
from prepare_for_bpe_model import prepare_for_bpe_model
from text2segments import text2segments
import gxl_utils
from prepare_words import prepare_words
from train_bbpe_model import train_bbpe_model
def main():
    """"""
    input_text_path = "/home/work_nfs8/xlgeng/new_workspace/icefall/egs/multi_zh_en/ASR/gxl_data/3000h/text"
    vocab_size = 2000
    output_dir = f'/home/work_nfs8/xlgeng/new_workspace/icefall/egs/multi_zh_en/ASR/gxl_data/lang_bbpe_{vocab_size}'
    utils_file.makedir(output_dir)
    print('耿雪龙')
    utils_file.logging_print('耿雪龙')
    utils_file.logging_print('耿雪龙')

    # return
    utils_file.logging_print('开始准备数据 for bpe model')
    text_dict = utils_file.load_dict_from_scp(input_text_path)
    utils_file.write_list_to_file(list(text_dict.values()), os.path.join(output_dir, 'text.list'))
    prepare_for_bpe_model(output_dir, os.path.join(output_dir, 'text.list'))
    utils_file.logging_print('准备数据 for bpe model 完毕')

    # utils_file.logging_print('开始 text2segments')
    # input_file_path = os.path.join(output_dir, 'text.list')
    # output_file_path = os.path.join(output_dir, 'text_words_segmentation.list')
    # text2segments(input_file_path, output_file_path)
    # utils_file.logging_print(' text2segments 完毕')

    utils_file.logging_print('开始生成 words_no_ids.txt')
    input_file_path = os.path.join(output_dir, 'text_words_segmentation.list')
    output_file_path = os.path.join(output_dir, 'words_no_ids.txt')
    gxl_utils.do_get_vocab_no_id_file(input_file_path, output_file_path)
    utils_file.logging_print('生成 words_no_ids.txt 完毕')

    utils_file.logging_print('开始 prepare_words')
    input_file_path = os.path.join(output_dir, 'words_no_ids.txt')
    output_file_path = os.path.join(output_dir, 'words.txt')
    prepare_words(input_file_path, output_file_path)
    utils_file.logging_print(' prepare_words 完毕')

    # utils_file.logging_print('开始 train_bbpe_model')
    # lang_dir = output_dir
    # vocab_size = vocab_size
    # transcript = os.path.join(output_dir, 'text.list')
    # train_bbpe_model(lang_dir, transcript, vocab_size)
    # utils_file.logging_print(' train_bbpe_model 完毕')
    #
    # utils_file.logging_print('开始: prepare_lang_bbpe')
    # lang_dir = output_dir
    # utils_file.logging_print('完成,其实没有作为: prepare_lang_bbpe')


def do_fbank4bigdata():
    data_dir = "/home/work_nfs5_ssd/hfxue/data/data4w/source_1/"
    dataname_list = os.listdir(data_dir)
    output_dir = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/3Wh"
    utils_file.makedir(output_dir)
    res_wav_dict = {}
    res_text_dict = {}
    for dataname in dataname_list:
        temp_data_dir = os.path.join(data_dir, dataname)
        wav_path_i = os.path.join(temp_data_dir, 'wav.scp')
        text_path_i = os.path.join(temp_data_dir, 'text')
        if os.path.exists(wav_path_i) and os.path.exists(text_path_i):
            wav_dict_i = utils_file.load_dict_from_scp(wav_path_i)
            text_dict_i = utils_file.load_dict_from_scp(text_path_i)
            res_wav_dict.update(wav_dict_i)
            res_text_dict.update(text_dict_i)
            utils_file.logging_print(dataname, 'len(wav_dict_i):  ', len(wav_dict_i), 'len(text_dict_i):  ', len(text_dict_i))
        else:
            utils_file.logging_print(dataname, 'not exists')
            continue
    res_text_dict = utils_file.do_get_random_subdict(res_text_dict,len(res_text_dict))
    res_wav_dict = utils_file.do_get_random_subdict(res_wav_dict,len(res_wav_dict))
    wav_path = os.path.join(output_dir, 'wav.scp')
    text_path = os.path.join(output_dir, 'text')
    utils_file.write_dict_to_scp(res_wav_dict,wav_path )
    utils_file.write_dict_to_scp(res_text_dict, text_path)
    manifest_dir = os.path.join(output_dir, 'manifest')
    utils_file.makedir(manifest_dir)
    fbank_dir = os.path.join(output_dir, 'fbank')
    utils_file.makedir(fbank_dir)
    utils_file.do_make_data4icefall(wav_path, text_path, manifest_dir, fbank_dir, prefix='3W')

def do_fbank4tests():
    test_dir = "/home/work_nfs8/xlgeng/data/scp_test"
    output_dir = "/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/icefall_assistant/en_cn/gxl_data/test_family"
    utils_file.makedir_sil(output_dir)
    dataname_list = os.listdir(test_dir)
    torch.set_num_threads(1)
    for dataname in dataname_list:
        temp_data_dir = os.path.join(test_dir, dataname)
        wav_path_i = os.path.join(temp_data_dir, 'wav.scp')
        text_path_i = os.path.join(temp_data_dir, 'text')
        utils_file.logging_print('开始处理:', dataname)
        if os.path.exists(wav_path_i) and os.path.exists(text_path_i):
            temp_output_dir = os.path.join(output_dir, dataname)
            fbank_dir = os.path.join(temp_output_dir, 'fbank')
            utils_file.makedir_sil(fbank_dir)
            manifest_dir = os.path.join(temp_output_dir, 'manifest')
            utils_file.makedir_sil(manifest_dir)
            utils_file.do_make_data4icefall(wav_path_i, text_path_i, manifest_dir, fbank_dir, partition='test',prefix=dataname )
        else:
            utils_file.logging_print(dataname, 'not exists')
            continue
        utils_file.logging_print('sleep 3s')
        time.sleep(3)







if __name__ == "__main__":
    # do_fbank4bigdata()
    do_fbank4tests()