from __future__ import print_function

import argparse
import copy
import logging
import os
import sys
import yaml

import torch
import torchaudio
import torchaudio.compliance.kaldi as kaldi
from wenet.utils.checkpoint import load_checkpoint
from wenet.utils.file_utils import read_symbol_table, read_non_lang_symbols
from wenet.utils.init_model import init_model


#根据给定的模型目录加载预训练的模型及其配置，并将其部署到可用的计算设备上。
def init_model_(model_dir):
    model_pt = os.path.join(model_dir, 'final.pt')
    config_yaml = os.path.join(model_dir, 'train.yaml')
    with open(config_yaml, 'r') as fin:
        configs = yaml.load(fin, Loader=yaml.FullLoader)
    # Init asr model from configs
    print("init model ...")
    model = init_model(configs)
    load_checkpoint(model, model_pt)
    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')
    model = model.to(device)
    return model, device


def infer(model_dir, model, device, input_wav: str = '', mode: str = 'ctc_greedy_search'):
    tokens_dict = os.path.join(model_dir, 'units.txt')
    symbol_table = read_symbol_table(tokens_dict)

    # Load dict
    char_dict = {v: k for k, v in symbol_table.items()}  #symbol_table 的反向映射
    eos = len(char_dict) - 1
    # process audio  音频文件加载:
    if input_wav != '':
        waveform, sample_rate = torchaudio.load(input_wav)
    else:
        waveform, sample_rate = torchaudio.load('test.wav')

    #使用 waveform.size(1) 获取音频波形数据的列数，这代表了音频的采样点数量。
    #将采样点数量除以 sample_rate 来计算音频的长度（单位为秒）。
    # 接着乘以 100 是为了将音频长度转换为以毫秒为单位。最终得到的结果是音频的时长，存储在变量 num_frames 中。
    num_frames = waveform.size(1) / sample_rate * 100
    if num_frames < 10:
        print("audio is too short ...")
        return ""

    # resample  音频重采样 确保音频采样率是16000 Hz   有点低可加强
    if sample_rate != 16000:
        waveform = torchaudio.transforms.Resample(
            orig_freq=sample_rate, new_freq=16000)(waveform)
        sample_rate = 16000
    #音频波形缩放
    waveform = waveform * (1 << 15)
    mat = kaldi.fbank(waveform,              #计算梅尔倒谱系数
                      num_mel_bins=80,            #设置mel滤波器的数量
                      frame_length=25,            #设置帧长
                      frame_shift=10,              #设置帧移
                      dither=0.0,                  #设置添加噪声
                      energy_floor=0.0,            #设置能量阈值
                      sample_frequency=sample_rate)    #设置音频采样率

    feats = torch.unsqueeze(mat, dim=0)    #增加一维维度
    feats_lengths = torch.unsqueeze(torch.tensor(feats.size(1), dtype=torch.int32), dim=0)
    feats = feats.to(device)
    feats_lengths = feats_lengths.to(device)

    ##params:
    decoding_chunk_size = -1         #设置解码的块大小，-1代表使用全部块
    num_decoding_left_chunks = -1    #设置解码剩余的块数，-1代表使用全部块
    simulate_streaming = False     #设置是否模拟流式解码
    beam_size = 10                  #设置束搜索的大小
    ctc_weight = 0.3                #设置ctc的权重
    reverse_weight = 0.5            #设置反向搜索的权重

    print('strat inference ...')
    ##inference
    model.eval()
    with torch.no_grad():            #禁用梯度计算
        if mode == 'ctc_greedy_search':
            hyps, _ = model.ctc_greedy_search(
                feats,
                feats_lengths,
                decoding_chunk_size=decoding_chunk_size,
                num_decoding_left_chunks=num_decoding_left_chunks,
                simulate_streaming=simulate_streaming)
        # hyps = [hyp.tolist() for hyp in hyps]

        elif mode == 'ctc_prefix_beam_search':
            assert (feats.size(0) == 1)
            hyp, _ = model.ctc_prefix_beam_search(
                feats,
                feats_lengths,
                beam_size,
                decoding_chunk_size=decoding_chunk_size,
                num_decoding_left_chunks=num_decoding_left_chunks,
                simulate_streaming=simulate_streaming)
            hyps = [hyp]

        elif mode == 'attention_rescoring':
            assert (feats.size(0) == 1)
            hyp, _ = model.attention_rescoring(
                feats,
                feats_lengths,
                beam_size,
                decoding_chunk_size=decoding_chunk_size,
                num_decoding_left_chunks=num_decoding_left_chunks,
                ctc_weight=ctc_weight,
                simulate_streaming=simulate_streaming,
                reverse_weight=reverse_weight)
            hyps = [hyp]

            ## decode
    result = []    #存储解码结果
    for w in hyps[0]:
        if w == eos:
            break
        result.append(char_dict[w])

    return "".join(result)+"。"      #从模型的解码结果中提取出文本内容


if __name__ == '__main__':
    model_dir = "model_files"
    input_wav = "record_wav/23_12_14.wav"
    model, device = init_model_(model_dir)
    # print(model)
    result = infer(model_dir, model, device, input_wav)
    # print(result)

    # use_cuda = torch.cuda.is_available()
    # print("use cuda:", use_cuda)