import soundfile
from funasr import AutoModel
from modelscope import pipeline, Tasks
from modelscope.utils.logger import get_logger

logger = get_logger()
inference_pipeline = AutoModel(model="damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
                  model_revision="v2.0.4",
                  vad_model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
                  vad_model_revision="v2.0.4",
                  punc_model="damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
                  punc_model_revision="v2.0.4",
                  spk_model="damo/speech_campplus_sv_zh-cn_16k-common",
                  spk_model_revision="v2.0.2",

            ngpu=1,
        )
# res = inference_pipeline.generate(
#         input="examples/vad_example.wav",
#         batch_size_s=300, batch_size_threshold_s=60)
# print(res)

if __name__ == '__main__':

    encoder_chunk_look_back = 0  # number of chunks to lookback for encoder self-attention
    decoder_chunk_look_back = 0  # number of encoder chunks to lookback for decoder cross-attention
    # 假设音频文件路径为 "path/to/audio.wav"
    wav_file = "asr_vad_punc_example.wav"
    speech, sample_rate = soundfile.read(wav_file)

    chunk_size = 200  # ms
    chunk_stride = int(chunk_size * sample_rate / 1000)

    cache = {}
    total_chunk_num = int(len((speech) - 1) / chunk_stride + 1)
    for i in range(total_chunk_num):
        speech_chunk = speech[i * chunk_stride:(i + 1) * chunk_stride]
        is_final = i == total_chunk_num - 1
        res = inference_pipeline.generate(input=speech_chunk,
                batch_size_s=300, batch_size_threshold_s=60
                )
        print(res)

