# 语音转文字

# from modelscope.pipelines import pipeline
# from modelscope.utils.constant import Tasks

# inference_pipeline = pipeline(
#     task=Tasks.auto_speech_recognition,
#     model=r'Model/SenseVoiceSmall',
#     model_revision="master",
#     device="cuda:0",
#     use_itn=True,
#     disable_update=True)


# rec_result = inference_pipeline(r"D:\Project\Chat_Project\output_5.wav")
# print(rec_result)



from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess

model_dir = r"Model/SenseVoiceSmall"

model = AutoModel(
    model=r"D:\Downloads\SenseVoiceSmall",
    trust_remote_code=True,
    remote_code="./model.py",    
    vad_model="fsmn-vad",
    vad_kwargs={"max_single_segment_time": 30000},
    device="cuda:0",
    use_itn=True,
    disable_update=True
)

# en
res = model.generate(
    input=r"D:\Project\Chat_Project\output_5.wav",
    cache={},
    language="zh",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=True,  #
    merge_length_s=15,
)
text = rich_transcription_postprocess(res[0]["text"])
print(text)