import os
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess

# 初始化模型
model = AutoModel(
    model="paraformer-zh",
    vad_model="fsmn-vad",
    punc_model="ct-punc",
    spk_model="cam++",
    disable_update=True,
    device="cuda:0",
)

# 当前目录
current_dir = os.path.dirname(os.path.abspath(__file__))

# 指定音频输入文件夹和输出文件夹
input_folder = os.path.join(current_dir, "audio")
output_folder = os.path.join(current_dir, "transcribe_output")
audio_extensions = [".wav", ".mp3", ".m4a", ".flac"]

os.makedirs(output_folder, exist_ok=True)

# 遍历 audio 文件夹下的音频文件
for filename in os.listdir(input_folder):
    if any(filename.lower().endswith(ext) for ext in audio_extensions):
        audio_path = os.path.join(input_folder, filename)
        print(f"正在处理：{audio_path}")
        res = model.generate(
            input=audio_path,
            cache={},
            language="zn",
            use_itn=True,
            batch_size_s=60,
            merge_vad=True,
            merge_length_s=15,
        )

        output_file = os.path.join(output_folder, f"{os.path.splitext(filename)[0]}.txt")

        with open(output_file, "w", encoding="utf-8") as file:
            previous_spk = None
            current_line = ""
            for sentence in res[0]["sentence_info"]:
                spk = sentence["spk"]
                text = rich_transcription_postprocess(sentence["text"])

                if spk == previous_spk:
                    current_line += text
                else:
                    if previous_spk is not None:
                        file.write(f"{current_line}\n")
                    current_line = f"讲话人-{spk}:  {text}"
                previous_spk = spk

            if current_line:
                file.write(f"{current_line}\n")

        print(f"已输出到：{output_file}")
