import pyaudio
import numpy as np
import wave
import os

from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess

model = AutoModel(
    model=r"D:\Downloads\SenseVoiceSmall",
    trust_remote_code=False,
    # remote_code="./model.py",    
    vad_model="fsmn-vad",
    vad_kwargs={"max_single_segment_time": 30000},
    device="cpu",
    use_itn=True,
    disable_update=True,
    disable_pbar = True,
    disable_log = True
)

# 利用语音识别模型将音频数据转换为文本
def sound2text(audio_file):
    """
    利用语音识别模型将音频数据转换为文本
    """
    # en
    res = model.generate(
        input=audio_file,
        cache={},
        language="zh",  # "zh", "en", "yue", "ja", "ko", "nospeech"
        use_itn=True,
        batch_size_s=60,
        merge_vad=True,  #
        merge_length_s=15,
    )
    text = rich_transcription_postprocess(res[0]["text"])
    return text


if __name__ == "__main__":
    # 读取音频文件
    audio_file = r"D:\Project\Chat_Project\temp_wave\1.wav"
    
    # 如果音频文件存在，直接读取
    if os.path.exists(audio_file):
        with wave.open(audio_file, 'rb') as wf:
            audio_data = wf.readframes(wf.getnframes())
    else:
        # 否则录制一段音频
        print("请开始说话（录音5秒钟）...")
        CHUNK = 1440
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 48000
        RECORD_SECONDS = 5
        
        p = pyaudio.PyAudio()
        stream = p.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        input=True,
                        frames_per_buffer=CHUNK,
                        input_device_index=1)
        
        frames = []
        for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
            data = stream.read(CHUNK)
            frames.append(data)
        
        stream.stop_stream()
        stream.close()
        p.terminate()
        # 保存录音
        with wave.open(audio_file, 'wb') as wf:
            wf.setnchannels(CHANNELS)
            wf.setsampwidth(p.get_sample_size(FORMAT))
            wf.setframerate(RATE)
            wf.writeframes(b''.join(frames))
        
        audio_data = b''.join(frames)
        print(f"录音已保存为 {audio_file}")
    
    # 利用语音识别模型将音频数据转换为文本
    text = sound2text(audio_file)

    # 删除音频文件
    # os.remove(audio_file)
    
    # 输出文本
    print("识别结果:")
    print(text)