import nemo.collections.asr as nemo_asr

def transcribe_audio(wav_path: str, timestamps: bool = False):
    model = nemo_asr.models.ASRModel.from_pretrained(model_name="nvidia/parakeet-tdt-0.6b-v2")
    outputs = model.transcribe([wav_path], timestamps=timestamps)
    res = outputs[0]
    return res.text, (res.timestamp if timestamps else None)

if __name__ == "__main__":
    wav = "test_audio.wav"
    # 假设 wav 文件已经存在或你把音频／视频先转成 wav
    text, ts = transcribe_audio(wav, timestamps=True)
    print("转写内容：", text)
    if ts:
        print("时间戳 segments：")
        for seg in ts.get("segment", []):
            print(f"{seg['start']} → {seg['end']}: {seg['segment']}")
