import io
import sys
import os


sys.path.insert(0,'../')

from wenet.utils.init_tokenizer import init_tokenizer
import torch
import torchaudio
from gxl_ai_utils.utils import utils_file
from gxl_ai_utils.config.gxl_config import GxlNode
from torchaudio.compliance import kaldi
from wenet.utils.init_model import init_model
def load_models(config_path,global_cmvn_path, dict_path):
    configs = utils_file.load_dict_from_yaml(config_path)
    configs['cmvn_conf']['cmvn_file'] = global_cmvn_path
    configs["tokenizer_conf"]['symbol_table_path'] = dict_path
    args = GxlNode({
        'checkpoint':"/home/work_nfs11/code/xlgeng/wenet_mine/examples/aishell/s0/exp/avg_3.pt",
        "device":0
    })
    tokenizer = init_tokenizer(configs)
    configs['input_dim'] = configs['dataset_conf']['fbank_conf'][
                    'num_mel_bins']
    configs['output_dim'] =  tokenizer.vocab_size()
    model, configs = init_model(args, configs)

    device = torch.device(args.device)
    model = model.to(device)
    model.eval()
    return model, tokenizer


dict_path = "/home/node54_tmpdata/xlgeng/ckpt/format_model/comformer_decoder_aishell/data/dict/lang_char.txt"
cmvn_path = "/home/node54_tmpdata/xlgeng/ckpt/format_model/comformer_decoder_aishell/data/global_cmvn"
model, tokenizer = load_models('conf/train_conformer.yaml', cmvn_path, dict_path)

def inference(audio_path):
    """"""
    mat, mat_lentgh = load_wav_get_feat(audio_path)
    mat = mat.to("cuda:0")
    mat_lentgh = mat_lentgh.to("cuda:0")
    results = model.decode(
        ['attention_rescoring'],
        mat,
        mat_lentgh,
        beam_size=35,
        decoding_chunk_size=-1,
        num_decoding_left_chunks=-1,
        ctc_weight=0.3,
        simulate_streaming=False,
        reverse_weight=0.2,
        context_graph=None,
        blank_id=0,
        blank_penalty=0,
        length_penalty=0,
        infos={})
    decode_result_for_mode = results['attention_rescoring']
    tokens = decode_result_for_mode[0].tokens
    res_str = tokenizer.detokenize(tokens)[0]
    return res_str


def load_wav_get_feat(audio_path):
    """"""
    # load wav
    waveform, sample_rate = torchaudio.load(audio_path)
    resample_rate = 16000
    waveform = torchaudio.transforms.Resample(
        orig_freq=sample_rate, new_freq=resample_rate)(waveform)
    if waveform.shape[0] != 1:
        waveform = waveform.mean(dim=0, keepdim=True)
    waveform = waveform * (1 << 15)
    # Only keep key, feat, label
    mat = kaldi.fbank(waveform,
                      num_mel_bins=80,
                      frame_length=25,
                      frame_shift=10,
                      dither=0.0,
                      energy_floor=0.0,
                      sample_frequency=resample_rate)
    mat = mat.unsqueeze(0)
    mat_length = torch.tensor([mat.shape[1],])
    return mat, mat_length

if __name__ == '__main__':
    """"""
    dict_path = "/home/node54_tmpdata/xlgeng/ckpt/format_model/comformer_decoder_aishell/data/dict/lang_char.txt"
    cmvn_path ="/home/node54_tmpdata/xlgeng/ckpt/format_model/comformer_decoder_aishell/data/global_cmvn"
    model, tokenizer = load_models('conf/train_conformer.yaml',cmvn_path,dict_path)
    mat, mat_lentgh = load_wav_get_feat("audio_files/1728893165.wav")
    mat = mat.to("cuda:0")
    mat_lentgh = mat_lentgh.to("cuda:0")
    print(mat_lentgh)
    print(mat.shape)
    results = model.decode(
        ['attention_rescoring'],
        mat,
        mat_lentgh,
        beam_size=35,
        decoding_chunk_size=-1,
        num_decoding_left_chunks=-1,
        ctc_weight=0.3,
        simulate_streaming=False,
        reverse_weight=0.2,
        context_graph=None,
        blank_id=0,
        blank_penalty=0,
        length_penalty=0,
        infos={})
    print(results)
    decode_result_for_mode = results['attention_rescoring']
    tokens = decode_result_for_mode[0].tokens
    res_str = tokenizer.detokenize(tokens)[0]
    print(res_str)