from modelscope import pipeline, Tasks
from modelscope.utils.logger import get_logger

logger = get_logger()
inference_pipeline = pipeline(
    task=Tasks.auto_speech_recognition,
    model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch', # 语音识别
    model_revision="v2.0.4",
    vad_model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch', # 端点检测-> 有效语音的起止时间点
    vad_model_revision="v2.0.4",
    punc_model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch', # 标点恢复->标点符号
    punc_model_revision="v2.0.4",
    ngpu=1,
)
rec_result = inference_pipeline(
    input='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav')
logger.info("asr_vad_punc inference result: {0}".format(rec_result))
# assert rec_result[0]["text"] == "欢迎大家来体验达摩院推出的语音识别模型。"