import torch

import asr_model_module
import data_handler
import config

if __name__ == '__main__':
    tokenizer = data_handler.PingyinVocabTokenizer()
    tokenizer.build_vocab()
    model = asr_model_module.AsrModel(fbank_dim=100, vocab_size=len(tokenizer), hidden_dim=1024, dropout=0.0897)
    path = "F:\code\python\deeplearning\pythonProject\\ai\output\chatbot_pytorch\gxl_model/"
    param_to_load = torch.load(path + "model_params_2600_asr.pth", map_location=torch.device('cpu'))
    param_to_load = {k.replace("module.", ""): v for k, v in param_to_load.items()}
    model.load_state_dict(param_to_load)
    file_path = "F:\code\python\deeplearning\pythonProject\\ai\data\\asr_pytorch\data_thchs30\data\A2_0.wav"
    fbank = data_handler.compute_fbank(file_path) #(seq, fbank)
    fbank = torch.tensor(fbank)
    fbank = fbank.unsqueeze(0) #(1,seq,fbank)
    len_seq = fbank.shape[1]
    len_seq = torch.tensor(len_seq)
    len_seq = len_seq.unsqueeze(0)
    output = model(fbank, len_seq)
    print(output.shape)
    predict, indices = torch.max(output, dim=2)
    print(indices[0])
    # print(tokenizer.decode(indices[0]))

