import sys

import torch
import torchaudio


sys.path.insert(0,'../../../')
from gxl_ai_utils.utils import utils_file
from wenet.utils.init_tokenizer import init_tokenizer
from gxl_ai_utils.config.gxl_config import GxlNode
from wenet.utils.init_model import init_model
import logging
import librosa
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(levelname)s %(message)s')
config_path = "/home/work_nfs15/asr_data/ckpt/asrllm/35000hour_model/epoch_2.yaml"
config_path = "../conf/train_ASLP_ASRLLM_downsample4.yaml"
checkpoint_path = "/home/work_nfs15/asr_data/ckpt/asrllm/35000hour_model/epoch_2.pt"
args = GxlNode({
    "checkpoint": checkpoint_path,
})
configs = utils_file.load_dict_from_yaml(config_path)
model, configs = init_model(args, configs)
gpu_id = 4
device = torch.device(f'cuda:{gpu_id}')
model = model.to(device)
tokenizer = init_tokenizer(configs)
print(model)
input_wav_path = "/home/work_nfs15/asr_data/data/asr_test_sets/speechio_12/wav/amM5MpA7URM_0033.wav"
input_prompt = "执行语音识别任务，将音频转换为文字。"

def do_decode(input_wav_path, input_prompt):
    waveform, sample_rate = torchaudio.load(input_wav_path)
    waveform = waveform.mean(dim=0, keepdim=True)
    waveform_lens = torch.tensor([waveform.shape[1]])
    print(f'waveform shape: {waveform.shape}')
    print(f'waveform_lens : {waveform_lens}')
    res_text = model.generate(wavs=waveform, wavs_len=waveform_lens, prompt=input_prompt)[0]
    return res_text

if __name__ == "__main__":
    """"""
    res = do_decode(input_wav_path, input_prompt)
    print(f'识别结果：{res}')