from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
from pydub import AudioSegment
import os
import tempfile
from tqdm import tqdm

model_dir = "/Users/duyi/PycharmProjects/big_model_download/iic/SenseVoiceSmall"
vad_model_dir = "/Users/duyi/PycharmProjects/big_model_download/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"

model = AutoModel(
    model=model_dir,
    trust_remote_code=True,
    vad_model=vad_model_dir,
    vad_kwargs={"max_single_segment_time": 30000},
    device="cpu",
)

# 加载音频文件
audio_file = "/Users/duyi/PycharmProjects/interview_voice_project/__001__data/罗培鑫面试.aac"
sound = AudioSegment.from_file(audio_file)
total_duration = len(sound) / 1000  # 总时长（秒）

# 设置分段时长（秒）
segment_duration = 30  # 每段30秒

# 计算分段数
num_segments = int(total_duration / segment_duration) + 1

# 创建临时目录存放分段音频
temp_dir = tempfile.mkdtemp()
segments = []

# 分段并保存
for i in range(num_segments):
    start_time = i * segment_duration * 1000
    end_time = min((i + 1) * segment_duration * 1000, len(sound))
    segment = sound[start_time:end_time]
    segment_file = os.path.join(temp_dir, f"segment_{i}.wav")
    segment.export(segment_file, format="wav")
    segments.append(segment_file)
    if i == 2:
        break

# 逐段识别
results = []
for segment_file in tqdm(segments, desc="Processing segments"):
    res = model.generate(
        input=segment_file,
        cache={},
        language="auto",
        use_itn=True,
        batch_size_s=60,
        merge_vad=True,
        merge_length_s=15,
    )
    text = rich_transcription_postprocess(res[0]["text"])
    print(text)
    results.append(text)

# 合并结果
final_text = "".join(results)
print(final_text)

# 清理临时文件
import shutil

shutil.rmtree(temp_dir)
