import logging
import os
import sys

import torch
import yaml

from wenet.utils.config import override_config
from wenet.utils.init_model import init_model
from wenet.utils.train_utils import init_distributed, init_dataset_and_dataloader, check_modify_and_save_config

sys.path.append('/home/work_nfs7/xlgeng/workspace/wenet-sanm')
from wenet.bin.train import main
from wenet.utils.init_kd_model import init_model as init_kd_model
from gxl_ai_utils.utils import utils_file, utils_data

from wenet.utils.init_tokenizer import init_tokenizer
from wenet.dataset import processor
from gxl_ai_utils.thread.my_thread import GxlDynamicThreadPool


def do_test_model():
    configs = utils_file.load_dict_from_yaml('./conf/train_kd.yaml')
    model = init_kd_model(configs)
    print(model)


def do_test_tokenizer():
    """
    这个paraformer词表只有中文汉字，没有标点符号。
    Returns:

    """
    args_dict = {
        'symbol_table': './data/units_paraformer.txt',
    }
    args = utils_file.do_dict2simpleNamespaceObj(args_dict)
    tokenizer = init_tokenizer({}, args.symbol_table)
    input_str = '我爱北京天安门,打发士大夫胜多负少的。发斯蒂芬圣达菲阿发达发热个人共同？'
    char_list, id_list = tokenizer.tokenize(input_str)
    print(char_list)
    print(id_list)
    str_output, char_list_out = tokenizer.detokenize(id_list)
    print(str_output)
    print(char_list_out)


def do_test_data():
    path = "E:\\gengxuelong_study\\server_local_adapter\\ai\\data\\small_aishell/test\\BAC009S0724W0133.wav"
    wav, _ = utils_data.torchaudio_load(path)
    data_list = [
        {"key": "BAC009S0724W0133",
         "wav": wav,
         "label": "我是耿雪龙焚烧发电萨芬",
         "sample_rate": 16000}
    ]
    res = processor.compute_log_mel_spectrogram(data_list)
    for i in res:
        print(i['feat'].shape)
        print(i['feat'])
    res = processor.compute_fbank(data_list, 80)
    for i in res:
        print(i['feat'].shape)
        print(i['feat'])


def do_test_dataset():
    """"""


def do_test_train():
    """"""
    args = utils_data.do_dict2simpleNamespaceObj(utils_file.load_dict_from_yaml('./argparse_run.yaml'))
    main(args)


def prepare_data(dataset_name, data_dir):
    utils_file.makedir_sil(f'./data/{dataset_name}')
    utils_data.do_make_raw_list(utils_file.join_path(data_dir, 'wav.scp'),
                                utils_file.join_path(data_dir, 'text'),
                                utils_file.join_path(f'./data/{dataset_name}', 'all.list'))
    dict_list = utils_file.load_dict_list_from_jsonl(utils_file.join_path(f'./data/{dataset_name}', 'all.list'))
    total_lens = len(dict_list)
    train_list = dict_list[:int(total_lens * 0.7)]
    dev_list = dict_list[int(total_lens * 0.7):int(total_lens * 0.9)]
    test_list = dict_list[int(total_lens * 0.9):]
    utils_file.write_dict_list_to_jsonl(train_list, utils_file.join_path(f'./data/{dataset_name}', 'train.list'))
    utils_file.write_dict_list_to_jsonl(dev_list, utils_file.join_path(f'./data/{dataset_name}', 'dev.list'))
    utils_file.write_dict_list_to_jsonl(test_list, utils_file.join_path(f'./data/{dataset_name}', 'test.list'))


def make_data_for_server_env():
    """"""
    input_dir = '/home/work_nfs5_ssd/hfxue/data/data4w/source_1/'
    dataset_name = ['AISHELL-2', 'aishell1', 'LibriSpeech']
    thread_runner = GxlDynamicThreadPool()
    for dataset in dataset_name:
        data_dir = utils_file.join_path(input_dir, dataset)
        all_list_path = utils_file.join_path(data_dir, 'all.list')
        if os.path.exists(all_list_path):
            utils_file.logging_print(f'文件{all_list_path}已存在,跳过')
            continue
        thread_runner.add_thread(prepare_data, [dataset, data_dir])
    thread_runner.start()


def do_test_inference():
    """"""
    from wenet.bin.recognize import get_args_for_windows
    args = get_args_for_windows()
    utils_file.write_dict_to_yaml(vars(args), './argparse_recognize.yaml')


def do_test_init_model():
    """"""
    args = utils_data.do_dict2simpleNamespaceObj(utils_file.load_dict_from_yaml('./argparse_run.yaml'))
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s')

    # Set random seed
    torch.manual_seed(777)

    # Read config
    with open(args.config, 'r') as fin:
        configs = yaml.load(fin, Loader=yaml.FullLoader)
    if len(args.override_config) > 0:
        configs = override_config(configs, args.override_config)

    # write by gxl, 在前向传播时, 通过该配置选择device是否为cpu
    configs['train_engine'] = args.train_engine
    # gxl write end

    # init tokenizer
    tokenizer = init_tokenizer(configs, args.symbol_table, args.bpe_model,
                               args.non_lang_syms)

    # Init env for ddp OR deepspeed
    world_size, local_rank, rank = init_distributed(args)

    # Get dataset & dataloader
    train_dataset, cv_dataset, train_data_loader, cv_data_loader = \
        init_dataset_and_dataloader(args, configs, tokenizer)

    # Do some sanity checks and save config to arsg.model_dir
    configs = check_modify_and_save_config(args, configs, tokenizer.symbol_table)

    # Init asr model from configs
    model, configs = init_model(args, configs)

def do_test_hubert_base():
    """"""
    from transformers import AutoProcessor, AutoModel
    # processor = AutoProcessor.from_pretrained("facebook/hubert-base-ls960")
    model = AutoModel.from_pretrained("facebook/hubert-base-ls960")

    num_params_pa = sum(p.numel() for p in model.encoder.parameters())
    utils_file.logging_print(f'hubert_base: {num_params_pa/1e6}M')
    print(model)

if __name__ == '__main__':
    """"""
    do_test_init_model()
    # do_test_hubert_base()