from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
import re
import string

model_dir = "./model/funasr"

# model = AutoModel(
#     model=model_dir,
#     vad_model="fsmn-vad",
#     vad_kwargs={"max_single_segment_time": 30000},
#     device="cuda:0",
# )
model = AutoModel(
    model=f"{model_dir}/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
    # model="model/SenseVoiceSmall",
    vad_model=f"{model_dir}/speech_fsmn_vad_zh-cn-16k-common-pytorch",
    punc_model=f"{model_dir}/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
    vad_kwargs={"max_single_segment_time": 30000},
    device="cuda:0",
)

# # en
def funAsrExec(f):
    res = model.generate(
        input=f,
        cache={},
        language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
        use_itn=True,
        sentence_timestamp=True,
        batch_size_s=60,
        merge_vad=True,  
        merge_length_s=15,
    )
    sentence_info = res[0]["sentence_info"]
    results = []
    for s in sentence_info:
       rs = {} 
       rs["start"] = s["start"]
       rs["end"] = s["end"]
       rs["text"] = s["text"]
       results.append(rs)
    return results
res = funAsrExec("./demo.wav")
print(res)