import glob
import os
import random
import tarfile

from gxl_ai_utils.utils import utils_file,utils_data

from wenet.utils.init_model import init_model
from wenet.utils.init_tokenizer import init_tokenizer
from wenet.utils.train_utils import check_modify_and_save_config


def do_test():
    num = int(1e20)
    print(1342342 % num)
    print(num)

def do_test_1():
    input_list_path = 'data_list/asru_test.list'
    dict_list = utils_file.load_dict_list_from_jsonl(input_list_path)
    scp_dic = {}
    for dic in dict_list:
        scp_dic[dic['key']] = dic['wav']
    utils_file.write_dict_to_scp(scp_dic, 'data_list/asru_test.scp')

def do_test_2():
    input_scp = 'data_list/asru_test.scp'
    scp_dict = utils_file.load_dict_from_scp(input_scp)
    tiny_scp_dict = {}
    for key, value in scp_dict.items():
        rand_int = random.randint(0, len(scp_dict))
        if rand_int < len(scp_dict) * 0.02:
            tiny_scp_dict[key] = value
    utils_file.write_dict_to_scp(tiny_scp_dict, 'data_list/tiny_asru_test.scp')

def do_test_3():
    """
    handle haoweilai
    Returns:

    """
    input_dir = '/home/work_nfs7/xlgeng/data/asr_data_inventory/haoweilai_587h'
    wav_scp = (os.path.join(input_dir, 'wav.scp'))
    text_scp = (os.path.join(input_dir, 'text'))
    all_data_list = utils_file.do_convert_wav_text_scp_to_jsonl(wav_scp, text_scp)
    test_data_list = []
    for dic in all_data_list:
        rand_int = random.randint(0, len(all_data_list))
        if rand_int < len(all_data_list) * 0.02:
            test_data_list.append(dic)
    utils_file.write_dict_list_to_jsonl(test_data_list, 'data_list/haoweilai_test/data.list')
    tiny_scp_dict = {}
    tiny_text_dict = {}
    for dic in test_data_list:
        tiny_scp_dict[dic['key']] = dic['wav']
        tiny_text_dict[dic['key']] = dic['txt']
    utils_file.write_dict_to_scp(tiny_scp_dict, 'data_list/haoweilai_test/wav.scp')
    utils_file.write_dict_to_scp(tiny_text_dict, 'data_list/haoweilai_test/text')


def extract_and_create_scp(tar_filename):
    # 打开tar文件
    with tarfile.open(tar_filename, 'r') as tar:
        # 获取tar文件中的所有文件名
        file_names = tar.getnames()

        # 创建一个字典用于存储文本文件的内容
        text_contents = {}

        # 遍历文件名
        for file_name in file_names:
            # 仅处理文本文件
            if file_name.endswith('.txt'):
                # 从tar文件中读取文本内容
                text_content = tar.extractfile(file_name).read().decode('utf-8').strip()

                # 获取对应的音频文件名
                file_name = file_name.replace('.txt', '')

                # 使用文本文件名作为key，文本内容作为value存入字典
                text_contents[file_name] = text_content

        return text_contents


def do_test_4():
    """do make huawei test"""
    input_dir = '/home/41_data/hwang/huawei_cn_en/testsets/zhenmix_test_01'
    shard_list = glob.glob(f'{input_dir}/*.tar')
    print(shard_list)
    huawei_test_dir = './data_list/huawei_test'
    os.makedirs(huawei_test_dir, exist_ok=True)
    utils_file.write_list_to_file(shard_list, os.path.join(huawei_test_dir, 'test.shards'))
    text_dict = extract_and_create_scp(shard_list[0])
    print(text_dict)
    utils_file.write_dict_to_scp(text_dict, os.path.join(huawei_test_dir, 'text'))
def do_test_model():
    configs = utils_file.load_dict_from_yaml('./conf/train_whisper_small_streaming_2.yaml')
    args = utils_file.do_dict2simpleNamespaceObj(utils_file.load_dict_from_yaml('./argparse_run.yaml'))
    utils_file.makedir_sil(args.model_dir)
    # init tokenizer
    tokenizer = init_tokenizer(configs)
    configs['vocab_size'] = tokenizer.vocab_size()
    # Do some sanity checks and save config to arsg.model_dir
    configs = check_modify_and_save_config(args, configs, tokenizer.symbol_table)
    model, _ = init_model(args, configs)
    utils_file.print_model_size(model)
    utils_file.print_model_size(model.encoder)
    utils_file.print_model_size(model.decoder)

if __name__ == '__main__':
    do_test_model()