import copy
import glob
import logging
import os
import random
import sys
import types

import torch
from torch.utils.data import DataLoader

from wenet.dataset.dataset import Dataset
from wenet.transformer.hubert_encoder import S3prlFrontend
from wenet.transformer.whisper_encoder import OpenAIWhisperEncoder
from wenet.utils.init_model import init_model

sys.path.append("/home/work_nfs7/xlgeng/new_workspace/wenet_gxl_salmonn/")
from wenet.utils.init_tokenizer import init_tokenizer
# from gxl_ai_utils.utils import utils_file
# from gxl_ai_utils.config.gxl_config import GxlNode
from wenet.utils.train_utils import init_dataset_and_dataloader


def do_test():
    configs = load_dict_from_yaml("../conf/train_salmonn_v8.yaml")
    llm_tokenizer = init_tokenizer(configs)
    print(llm_tokenizer.vocab_size())
    tokenizer = llm_tokenizer.tokenizer

    print(tokenizer.vocab_size)

    text = "你好，ChatGPT！"
    tokens = tokenizer.tokenize(text)
    print("Tokenized text:", tokens)

    ids = tokenizer.convert_tokens_to_ids(tokens)
    print("Token IDs:", ids)
    tokens = tokenizer.convert_ids_to_tokens(ids)
    print("Tokens:", tokens)
    decoded_text = tokenizer.decode(ids)
    print("Decoded text:", decoded_text)

    cls_token_id = tokenizer.cls_token_id
    sep_token_id = tokenizer.sep_token_id
    print("CLS Token ID:", cls_token_id)
    print("SEP Token ID:", sep_token_id)

    max_length = 20
    padding = "max_length"  # 或者 "longest"
    truncated_text = tokenizer.encode(text, max_length=max_length, padding=padding, return_tensors="pt")
    print("Truncated and Padded Text:", truncated_text)

    # 获取底层的词表（Vocabulary）
    symbol_table = tokenizer.get_vocab()
    # print("Symbol Table:", symbol_table)


def do_test2():
    input = torch.randn(10, 100)
    input = input.to(torch.float32)
    model = torch.nn.Linear(100, 10)
    for param in model.parameters():
        param.data = param.data.to(torch.float16)
    output = model(input)
    print(output.dtype)

import yaml
def load_dict_from_yaml(file_path: str):
    with open(file_path, 'rt', encoding='utf-8') as f:
        dict_1 = yaml.load(f, Loader=yaml.FullLoader)
    return dict_1

def do_test_dataset():
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    configs = load_dict_from_yaml("../configs.yaml")
    args = load_dict_from_yaml('../args_train.yaml')
    args = types.SimpleNamespace(**args)
    # init tokenizer
    tokenizer = init_tokenizer(configs)
    logging.info("Vocab size: {}".format(tokenizer.vocab_size()))

    train_dataset, cv_dataset, train_data_loader, cv_data_loader = \
        init_dataset_and_dataloader(args, configs, tokenizer)
    for id, batch in enumerate(train_data_loader):
        for k, v in batch.items():
            if k == 'keys':
                # print(k, v)
                pass
            else:
                print(k, v.shape)
        print('------------------------------------')
        if id >= 10:
            break

def do_test_recognize():
    import types
    args = load_dict_from_yaml('../args_recognize.yaml')
    args = types.SimpleNamespace(**args)
    configs = load_dict_from_yaml('/home/work_nfs7/xlgeng/bsmu_template/exp/salmonn_v8_lr5e_5/train.yaml')
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s')
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    test_conf = copy.deepcopy(configs['dataset_conf'])

    test_conf['filter_conf']['max_length'] = 102400
    test_conf['filter_conf']['min_length'] = 0
    test_conf['filter_conf']['token_max_length'] = 102400
    test_conf['filter_conf']['token_min_length'] = 0
    test_conf['filter_conf']['max_output_input_ratio'] = 102400
    test_conf['filter_conf']['min_output_input_ratio'] = 0
    test_conf['speed_perturb'] = False
    test_conf['spec_aug'] = False
    test_conf['spec_sub'] = False
    test_conf['spec_trim'] = False
    test_conf['shuffle'] = False
    test_conf['sort'] = False
    if 'fbank_conf' in test_conf:
        test_conf['fbank_conf']['dither'] = 0.0
    elif 'mfcc_conf' in test_conf:
        test_conf['mfcc_conf']['dither'] = 0.0
    test_conf['batch_conf']['batch_type'] = "static"
    test_conf['batch_conf']['batch_size'] = args.batch_size

    tokenizer = init_tokenizer(configs)
    test_dataset = Dataset(args.data_type,
                           args.test_data,
                           tokenizer,
                           test_conf,
                           partition=False)

    test_data_loader = DataLoader(test_dataset, batch_size=None, num_workers=0)
    for id, batch in enumerate(test_data_loader):
        for k, v in batch.items():
            if k == 'keys':
                # print(k, v)
                pass
            else:
                print(k, v.shape)
        print('------------------------------------')
        if id >= 10:
            break
    # return
    args.jit = False
    model, configs = init_model(args, configs)

    use_cuda = args.gpu >= 0 and torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')
    model = model.to(device)
    model.eval()
    print(model)

    if "salmonn_decode" in args.modes:
        logging.info('decode mode: salmonn_decode')
        result_file = os.path.join(args.result_dir, 'text_hyp')
        with torch.no_grad(), open(result_file, 'w') as fout:
            for batch_idx, batch in enumerate(test_data_loader):
                sorted_keys, padded_feats, padding_labels, feats_lengths, label_lengths = batch
                padded_feats = padded_feats.to(device)
                feats_lengths = feats_lengths.to(device)
                padding_labels = padding_labels.to(device)
                label_lengths = label_lengths.to(device)
                prompt = 'Describe the speech.'
                if args.mode == 'salmonn_decode':
                    try:
                        hyp = model.generate(padded_feats, feats_lengths, prompt)
                    except RuntimeError as e:
                        logging.info(f'如下音频出现NaN：{sorted_keys}，error: {e}')

                for i, key in enumerate(sorted_keys):
                    logging.info('{} {}'.format(key, hyp[0]))
                    fout.write('{} {}\n'.format(key, hyp[0]))

from wenet.whisper.convert_whisper_to_wenet_config_and_ckpt import main as convert_whisper_to_wenet
# from gxl_ai_utils.utils import utils_file
def do_test_whisper():
    """"""

    configs = load_dict_from_yaml('../conf/train_salmonn_v9.yaml')
    # encoder = OpenAIWhisperEncoder(**configs['encoder_conf'])
    encoder = S3prlFrontend(**configs['encoder_conf'])
    print(encoder)
    wav = torch.randn(4,10000)
    wav_length = torch.tensor([10000, 9999,9323,8322])
    speech_embeds, speech_lens = encoder(wav, wav_length)
    print(speech_embeds.shape)
    print(speech_lens.shape)

def do_handle_data():
    from gxl_ai_utils.utils import utils_file
    wenetspeech_dir = "/home/local_data/hwang/huawei_cn_en/cn2/wenetspeech"
    ws_list = glob.glob(f'{wenetspeech_dir}/*.tar')
    all_data_list = utils_file.load_list_file_clean("/home/work_nfs7/xlgeng/new_workspace/wenet_gxl_salmonn/examples/aishell/salmonn/data_list/data_2.list.all.shuf")
    aishell1_list = glob.glob("/home/local_data/hwang/huawei_cn_en/cn/aishell1/*.tar")
    aishell2_list = glob.glob("/home/local_data/hwang/huawei_cn_en/cn/aishell2/*.tar")
    aishell1_list = aishell1_list*3
    aishell2_list = aishell2_list*3
    aishell4 = glob.glob("/home/local_data/xlgeng/data/shards/aishell4/*.tar")
    aili_far = glob.glob("/home/local_data/xlgeng/data/shards/Train_Ali_far/*.tar")
    ali_near = glob.glob("/home/local_data/xlgeng/data/shards/Train_Ali_near/*.tar")
    all_list = ws_list + aishell1_list + aishell2_list + aishell4 + aili_far + ali_near
    random.shuffle(all_list)
    utils_file.write_list_to_file(all_list, '/home/work_nfs7/xlgeng/new_workspace/wenet_gxl_salmonn/examples/aishell/salmonn/data_list/gxl_all.list')


if __name__ == "__main__":
    do_test_whisper()
