import json
import wave

import pyaudio
import numpy as np
import samplerate
from vosk import Model, KaldiRecognizer
from config import *


class SpeechRecognizer:
    def __init__(self):
        # 加载模型
        self.model = Model(VOSK_small_en)

        # 设置 PyAudio 以捕获麦克风输入
        self.p = pyaudio.PyAudio()
        self.input_rate = 48000  # 设备实际支持的采样率
        self.vosk_rate = 16000  # Vosk 要求的采样率
        # 打开麦克风流
        self.stream = self.p.open(
            format=pyaudio.paInt16,
            channels=1,  # 单声道
            rate=self.input_rate,  # 采样率
            input=True,
            frames_per_buffer=4000,  # 帧大小
            input_device_index=12
        )

        # 创建语音识别器
        self.rec = KaldiRecognizer(self.model, 16000)
        self.result_callback = None
        # === 新增：打开 WAV 文件用于保存原始音频 ===
        self.wav_file = wave.open("recorded_audio.wav", "wb")
        self.wav_file.setnchannels(1)
        self.wav_file.setsampwidth(2)  # int16 = 2 bytes
        self.wav_file.setframerate(self.input_rate)  # 保存原始 48000 Hz

    def set_result_callback(self, callback):
        """
        设置回调函数，用于处理语音识别结果
        :param callback: 回调函数，接受一个参数（识别的文本）
        """
        self.result_callback = callback

    def start_listening(self):
        print("开始语音识别，请开始讲话...")

        try:
            # 实时处理音频流
            while True:
                # 从麦克风获取音频数据
                data = self.stream.read(4000, exception_on_overflow=False)

                # 如果没有数据，跳过
                # if len(data) == 0:
                #     continue
                # === 新增：保存原始音频数据到文件 ===
                self.wav_file.writeframes(data)
                # 转为 numpy int16 数组
                audio_int16 = np.frombuffer(data, dtype=np.int16)

                # 重采样：48000 → 16000 (比例 = 16000/48000 = 1/3)
                audio_resampled = samplerate.resample(
                    audio_int16,
                    self.vosk_rate / self.input_rate,  # ratio = 1/3
                    converter_type='sinc_best'  # 高质量重采样
                )

                # 转回 bytes（必须是 int16 格式！）
                resampled_bytes = audio_resampled.astype(np.int16).tobytes()
                # 进行语音识别
                if self.rec.AcceptWaveform(data):
                    # 打印识别结果
                    result = self.rec.Result()  # 返回的是json字符串
                    result_json = json.loads(result)
                    text = result_json.get('text', '')  # 如果识别结果不为空则识别为空串
                    if text.strip():
                        print(f"识别结果: {text}")
                        # 如果设置了回调函数，则调用它
                        if self.result_callback:
                            self.result_callback(text)

        except KeyboardInterrupt:
            print("\n停止语音识别")
        finally:
            self.cleanup()

    def get_last_result(self):
        """
        获取最后一次部分识别结果
        :return: 最后一次识别的文本
        """
        result = self.rec.PartialResult()
        result_json = json.loads(result)
        return result_json.get('partial', '')

    def cleanup(self):
        # 清理资源
        if hasattr(self, 'stream') and self.stream:
            self.stream.stop_stream()
            self.stream.close()
        if hasattr(self, 'p'):
            self.p.terminate()
        # === 新增：关闭 WAV 文件 ===
        if hasattr(self, 'wav_file') and self.wav_file:
            self.wav_file.close()
            print("录音已保存为 recorded_audio.wav")


if __name__ == "__main__":
    recognizer = SpeechRecognizer()
    recognizer.start_listening()
