# speech_recognizer.py
import wave
import numpy as np
import pyaudio
import samplerate
from faster_whisper import WhisperModel
import os
import sys
import time

from config import WHISPER_MEDIUM


# ============= 查找指定麦克风 =============
def find_input_device(name_part: str):
    p = pyaudio.PyAudio()
    for i in range(p.get_device_count()):
        info = p.get_device_info_by_index(i)
        if name_part.lower() in info["name"].lower() and info["maxInputChannels"] > 0:
            print(f"✓ 找到麦克风: {info['name']} (index={i})")
            rate = int(info.get("defaultSampleRate", 48000))
            return i, rate
    print("⚠ 未找到指定麦克风，使用默认设备")
    index = p.get_default_input_device_info()["index"]
    info = p.get_device_info_by_index(index)
    return index, int(info.get("defaultSampleRate", 48000))


class SpeechRecognizer:
    def __init__(self, mic_name="Newmine"):
        self.model_path = WHISPER_MEDIUM

        print("加载 Whisper 模型中...")
        self.model = WhisperModel(
            self.model_path,
            device="cpu",
            compute_type="int8"
        )
        print("✓ 模型加载完成！")

        # Whisper 固定 16k 采样率
        self.target_rate = 16000

        # ===== 查找麦克风 =====
        self.p = pyaudio.PyAudio()
        self.device_index, self.device_rate = find_input_device(mic_name)
        print(f"🎤 麦克风采样率: {self.device_rate} Hz")

        # 麦克风使用它自己的采样率打开，如 48k
        self.stream = self.p.open(
            format=pyaudio.paInt16,
            channels=1,
            rate=self.device_rate,
            input=True,
            frames_per_buffer=4000,
            input_device_index=self.device_index
        )

        # 录音保存（可选）
        self.wav_file = wave.open("recorded_audio.wav", "wb")
        self.wav_file.setnchannels(1)
        self.wav_file.setsampwidth(2) # 16bit
        self.wav_file.setframerate(self.device_rate)

        # ===== 缓冲区（16k） =====
        self.buffer_16k = np.array([], dtype=np.float32)

        # VAD 判断参数
        self.chunk_sec = 0.20   # 每 200ms 处理一次
        self.chunk_48k = int(self.device_rate * self.chunk_sec)

        self.silence_threshold = 0.004  # 低于此阈值则判定为静音
        self.silence_frames = 0
        self.speech_frames = 0
    # 计算短时能量(采样点的均方根)
    def rms(self, audio):
        """计算短时能量（简单 VAD）"""
        return np.sqrt(np.mean(audio ** 2))

    def start_listening(self):
        print("开始实时语音识别...\n")

        try:
            while True:
                data = self.stream.read(self.chunk_48k, exception_on_overflow=False)
                self.wav_file.writeframes(data)
                # data归一化变成[-1.0, 1.0]
                audio_48k = np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0

                # 重采样为 16k
                audio_16k = samplerate.resample(
                    audio_48k,
                    self.target_rate / self.device_rate,
                    "sinc_best"
                )

                # 计算能量，判断是否在说话
                energy = self.rms(audio_16k)

                # ========== 语音检测 ==========
                if energy > self.silence_threshold:
                    # 正在说话
                    self.speech_frames += 1
                    self.silence_frames = 0
                    self.buffer_16k = np.concatenate([self.buffer_16k, audio_16k])
                else:
                    # 静音
                    self.silence_frames += 1

                # 连续 0.4 秒静音 → 判断说话结束, 开始识别
                if self.silence_frames >= int(0.4 / self.chunk_sec):
                    if len(self.buffer_16k) > self.target_rate * 0.3:  # 至少 0.3 秒有效语音
                        self.transcribe_buffer()
                    self.buffer_16k = np.array([], dtype=np.float32)
                    self.silence_frames = 0

        except KeyboardInterrupt:
            print("识别已停止")
        finally:
            self.cleanup()

    def transcribe_buffer(self):
        audio = self.buffer_16k.astype(np.float32)
        segments, _ = self.model.transcribe(
            audio,
            beam_size=5,
            language="en",
            without_timestamps=True # 禁用时间戳
        )

        text = "".join(seg.text.strip() + " " for seg in segments).strip()
        if text:
            print("识别:", text)

    def cleanup(self):
        self.stream.stop_stream()
        self.stream.close()
        self.p.terminate()
        self.wav_file.close()
        print("录音已保存为 recorded_audio.wav")


if __name__ == "__main__":
    recognizer = SpeechRecognizer("Newmine")
    recognizer.start_listening()
