import argparse

import torch
import torchaudio
import yaml
from torchaudio.compliance import kaldi

from wenet.utils.init_model import init_model

exp_path = "/home/work_nfs8/xlgeng/snsun/code/wenet_firered/examples/aishell/firered/exp/firered_aed_xs"
conf_path = f'{exp_path}/train.yaml'
ckpt_path = f'{exp_path}/epoch_0.pt'
# ckpt_path = f'{exp_path}/step_329999.pt'
args = argparse.Namespace(
    checkpoint=ckpt_path,
)
device_str = "cuda:0"
# 加载yaml配置文件
with open(conf_path, 'r', encoding='utf-8') as fin:
    configs = yaml.load(fin, Loader=yaml.FullLoader)

model, configs = init_model(args, configs)

device = torch.device(device_str)
model = model.to(device)
model.eval()

input_wav_path_list = [
    "/home/work_nfs8/asr_data/data/test_sets_format_3000/caption_aslp_record/wav/TEST_NET_Y0000000000_-KTKHdZ2fb8_S00156.wav",
    "/home/work_nfs8/asr_data/data/test_sets_format_3000/caption_aslp_record/wav/TEST_NET_Y0000000000_-KTKHdZ2fb8_S00155.wav",
    "/home/work_nfs8/asr_data/data/test_sets_format_3000/caption_aslp_record/wav/TEST_NET_Y0000000000_-KTKHdZ2fb8_S00152.wav"
]

big_caption_dict_encode = {
    "<laugh>": 0,
    "<laughing>": 0,
    "<cough>": 1,
    '<cry>': 2,
    '<screaming>': 3,
    '<sigh>': 4,
    '<throat clearing>': 5,
    '<sneeze>': 6,
    '<other>': 7,
}
big_caption_dict_decode = {v: k for k, v in big_caption_dict_encode.items()}


def compute_fbank(wav_path,
                  num_mel_bins=80,
                  frame_length=25,
                  frame_shift=10,
                  dither=0.1):
    """ Extract fbank
    """
    waveform, sample_rate = torchaudio.load(wav_path)
    waveform = waveform * (1 << 15)
    mat = kaldi.fbank(waveform,  # (T, 80)
                      num_mel_bins=num_mel_bins,
                      frame_length=frame_length,
                      frame_shift=frame_shift,
                      dither=dither,
                      energy_floor=0.0,
                      sample_frequency=sample_rate)
    return mat


# 将wav path list 转成 fbank特征的feats ,
# 这里的feats 是list, 每个元素是(B, T, D)的特征
def wav2feats(wav_path_list):
    feats_list = []
    for wav_path in wav_path_list:
        feat = compute_fbank(wav_path)
        feats_list.append(feat)
    feats_lengths = [feat.shape[0] for feat in feats_list]
    feats_lengths = torch.tensor(feats_lengths, dtype=torch.long)
    feats_pad = torch.nn.utils.rnn.pad_sequence(
        feats_list,
        batch_first=True,
        padding_value=0.0)
    return feats_pad, feats_lengths

feats, feats_lengths = wav2feats(input_wav_path_list)
feats = feats.to(device)
feats_lengths = feats_lengths.to(device)
# 计算fbank特征的长度


results = model.decode4caption(
    "caption",
    feats,
    feats_lengths,
)
# print(results)
ids = [result["hyp"] for result in results]
strs = [big_caption_dict_decode[id] for id in ids]
print(strs)