from __future__ import print_function

import argparse
import copy
import logging
import os
import sys
import yaml

import torch
import torchaudio
import torchaudio.compliance.kaldi as kaldi
from wenet.utils.checkpoint import load_checkpoint
from wenet.utils.file_utils import read_symbol_table, read_non_lang_symbols
from wenet.utils.init_model import init_model

def init_model_(model_dir):
    
    model_pt = os.path.join(model_dir, 'final.pt')
    model_pt = model_dir+"/final.pt"
    config_yaml = os.path.join(model_dir, 'train.yaml')
    with open(config_yaml, 'r') as fin:
        configs = yaml.load(fin, Loader=yaml.FullLoader)
    # Init asr model from configs
    print("init model ...")
    model = init_model(configs)
    load_checkpoint(model, model_pt)
    use_cuda =  torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')
    model = model.to(device)
    return model, device

def infer(model_dir, model, device, input_wav: str='', mode: str='ctc_greedy_search'):
    
    tokens_dict = os.path.join(model_dir, 'units.txt')
    symbol_table = read_symbol_table(tokens_dict)
    
    # Load dict
    char_dict = {v: k for k, v in symbol_table.items()}
    eos = len(char_dict) - 1   
    #process audio
    if input_wav != '':
        waveform, sample_rate = torchaudio.load(input_wav)
    else:
        waveform, sample_rate = torchaudio.load('test.wav')
    
    num_frames = waveform.size(1) / sample_rate * 100
    if num_frames < 10:
       print("audio is too short ...")
       return ""

    #resample
    if sample_rate != 16000:
        waveform = torchaudio.transforms.Resample(
                orig_freq=sample_rate, new_freq=16000)(waveform)
        sample_rate = 16000
    waveform = waveform * (1 << 15)
    mat = kaldi.fbank(waveform,
                      num_mel_bins=80,
                      frame_length=25,
                      frame_shift=10,
                      dither=0.0,
                      energy_floor=0.0,
                      sample_frequency=sample_rate)
    feats = torch.unsqueeze(mat, dim=0)
    feats_lengths = torch.unsqueeze(torch.tensor(feats.size(1), dtype=torch.int32), dim=0)
    feats = feats.to(device)
    feats_lengths = feats_lengths.to(device)
    
    ##params:
    decoding_chunk_size = -1 
    num_decoding_left_chunks = -1 
    simulate_streaming = False
    beam_size = 10
    ctc_weight = 0.3
    reverse_weight = 0.5 
   
    print('strat inference ...')     
    ##inference
    model.eval()
    with torch.no_grad():
       if mode == 'ctc_greedy_search':
           hyps, _ = model.ctc_greedy_search(
               feats,
               feats_lengths,
               decoding_chunk_size=decoding_chunk_size,
               num_decoding_left_chunks=num_decoding_left_chunks,
               simulate_streaming=simulate_streaming)
       #hyps = [hyp.tolist() for hyp in hyps]
 
       elif mode == 'ctc_prefix_beam_search':
            assert (feats.size(0) == 1)
            hyp, _ = model.ctc_prefix_beam_search(
                   feats,
                   feats_lengths,
                   beam_size,
                   decoding_chunk_size=decoding_chunk_size,
                   num_decoding_left_chunks=num_decoding_left_chunks,
                   simulate_streaming=simulate_streaming)
            hyps = [hyp]   
 
       elif mode == 'attention_rescoring':
            assert (feats.size(0) == 1)
            hyp, _ = model.attention_rescoring(
                   feats,
                   feats_lengths,
                   beam_size,
                   decoding_chunk_size=decoding_chunk_size,
                   num_decoding_left_chunks=num_decoding_left_chunks,
                   ctc_weight=ctc_weight,
                   simulate_streaming=simulate_streaming,
                   reverse_weight=reverse_weight)
            hyps = [hyp] 
   
    ## decode
    result = []
    for w in hyps[0]:
       if w == eos:
          break
       result.append(char_dict[w])
    
    return " ".join(result)

if __name__ == '__main__':
    model_dir = "model_files"
    input_wav = "Bnjx5086.wav"
    model, device = init_model_(model_dir)
    result = infer(model_dir, model, device, input_wav)
    print(result) 
