import json
import wave

import pyaudio
import numpy as np
import samplerate
from vosk import Model, KaldiRecognizer
from config import *
import os, sys
# 屏蔽 C 层日志（Vosk + ALSA + Jack）
sys.stderr = open(os.devnull, 'w')

def find_device_index_by_name(target_name: str):
    """根据设备名称自动查找输入设备索引"""
    p = pyaudio.PyAudio()
    for i in range(p.get_device_count()):
        info = p.get_device_info_by_index(i)
        name = info.get("name", "").lower()
        max_input_channels = info.get("maxInputChannels", 0)
        if target_name.lower() in name and max_input_channels > 0:
            # print(f"找到匹配设备: {info['name']} (index={i})")
            p.terminate()
            return i
    p.terminate()
    print(f"⚠️ 未找到包含 '{target_name}' 的麦克风，使用默认输入设备。")
    return None

class SpeechRecognizer:
    def __init__(self):
        # 加载模型

        self.model = Model(VOSK_en)
        
        # 设置 PyAudio 以捕获麦克风输入
        self.p = pyaudio.PyAudio()
        # 配置参数
        self.input_device_index = find_device_index_by_name("Newmine")   # USB 麦克风
        self.input_rate = 48000       # 设备实际支持的采样率
        self.vosk_rate = 16000        # Vosk 要求的采样率
        # 打开麦克风流
        self.stream = self.p.open(
            format=pyaudio.paInt16,
            channels=1,  # 单声道
            rate=self.input_rate,  # 采样率
            input=True,
            frames_per_buffer=4000,  # 帧大小
            input_device_index=self.input_device_index
        )
        
        # 创建语音识别器
        self.rec = KaldiRecognizer(self.model, 16000)
        # === 新增：打开 WAV 文件用于保存原始音频 ===
        self.wav_file = wave.open("recorded_audio.wav", "wb")
        self.wav_file.setnchannels(1)
        self.wav_file.setsampwidth(2)  # int16 = 2 bytes
        self.wav_file.setframerate(self.input_rate)  # 保存原始 48000 Hz

    def start_listening(self):
        print("开始语音识别，请开始讲话...")
        
        try:
            buffer = b""
            # 实时处理音频流
            while True:
                # 从麦克风获取音频数据
                data = self.stream.read(4000, exception_on_overflow=False)
                # === 保存原始音频数据到文件 ===
                self.wav_file.writeframes(data)
                buffer += data
                # 每累计约0.2秒音频处理一次（减少碎片）
                if len(buffer) >= 6400:
                    # 转为 numpy int16 数组
                    audio_int16 = np.frombuffer(buffer, dtype=np.int16)

                    # 重采样：48000 → 16000 (比例 = 16000/48000 = 1/3)
                    audio_resampled = samplerate.resample(
                        audio_int16,
                        self.vosk_rate / self.input_rate,  # ratio = 1/3
                        converter_type='sinc_best'  # 高质量重采样
                    )

                    # 转回 bytes（必须是 int16 格式！）
                    resampled_bytes = audio_resampled.astype(np.int16).tobytes()
                    buffer = b""
                    # 进行语音识别
                    if self.rec.AcceptWaveform(resampled_bytes):
                        # 打印识别结果
                        result = json.loads(self.rec.Result())
                        text = result.get('text', '')#如果识别结果不为空则识别为空串
                        if text.strip():
                            print(f"识别结果: {text}")

        except KeyboardInterrupt:
            print("\n停止语音识别")
        finally:
            self.cleanup()

    def cleanup(self):
        # 清理资源
        if hasattr(self, 'stream') and self.stream:
            self.stream.stop_stream()
            self.stream.close()
        if hasattr(self, 'p'):
            self.p.terminate()
        # === 新增：关闭 WAV 文件 ===
        if hasattr(self, 'wav_file') and self.wav_file:
            self.wav_file.close()
            print("录音已保存为 recorded_audio.wav")

if __name__ == "__main__":
    recognizer = SpeechRecognizer()
    recognizer.start_listening()
