import queue
import time
import numpy as np
import torch
from common.config import conf
from common.log import logger
from vad.vad import VAD

SAMPLE_RATE = 16000
CHUNK_SIZE = 512


class SileroVAD(VAD):
    def __init__(self, vad_queue: queue.Queue, recording_callback, mute_callback):
        super().__init__(vad_queue)
        model_path = conf().get("silero_model")
        device = conf().get("device", "cpu")
        self.model, _ = torch.hub.load(
            repo_or_dir=model_path,
            model='silero_vad',
            source='local',
            force_reload=True
        )
        self.model.to(device)
        self.device = device

        # 回调函数
        self.recording_callback = recording_callback
        self.mute_callback = mute_callback

        # 状态参数
        self.confidence_threshold = 0.5
        self.history_size = 5
        self.dynamic_silence_threshold = 1  # 秒
        self.buffer_size = 20  # 音频块数量

    def start_vad(self):
        if self.running:
            raise RuntimeError("VAD already running")
        self.running = True
        self.thread.start()

    def stop_vad(self):
        if not self.running:
            return
        self.running = False
        if self.thread.is_alive():
            self.thread.join(timeout=1)

    def _detect_voice(self):
        """核心检测逻辑"""
        audio_buffer = []
        confidence_history = []
        last_voice_time = 0
        is_recording = False
        audio_data = b""
        while self.running:
            try:
                # 1. 获取音频数据
                audio_chunk = self.vad_queue.get_nowait()

                # 2. 维护音频缓冲区
                audio_buffer.append(audio_chunk)
                if len(audio_buffer) > self.buffer_size:
                    audio_buffer.pop(0)

                # 3. VAD检测
                audio_np = np.frombuffer(audio_chunk, dtype=np.int16)
                audio_float = audio_np.astype(np.float32) / 32768.0
                input_tensor = torch.from_numpy(audio_float).to(self.device)
                confidence = self.model(input_tensor, sr=SAMPLE_RATE).item()

                # 4. 置信度平滑处理
                confidence_history.append(confidence)
                if len(confidence_history) > self.history_size:
                    confidence_history.pop(0)
                avg_confidence = sum(confidence_history) / len(confidence_history)

                # 5. 状态显示
                print(f"\rVAD置信度: {avg_confidence:.4f}", end="")

                # 6. 语音检测逻辑
                current_time = time.time()
                if avg_confidence > self.confidence_threshold:
                    last_voice_time = current_time

                    if not is_recording:
                        # 开始录音
                        print("\n检测到语音，开始录音...")
                        is_recording = True
                        audio_data = b"".join(audio_buffer)  # 包含历史缓冲
                        # 调用回调函数
                        self.recording_callback()

                # 7. 录音中处理
                if is_recording:
                    audio_data += audio_chunk

                    # 静默检测
                    silence_duration = current_time - last_voice_time
                    if silence_duration > self.dynamic_silence_threshold:
                        print(f"\n持续静音 {silence_duration:.1f}s，停止录音")
                        # 调用回调函数
                        self.mute_callback(audio_data)
                        # 重置状态
                        is_recording = False
                        audio_data = b""
                        audio_buffer.clear()
                        confidence_history.clear()
                        last_voice_time = 0

            except queue.Empty:
                time.sleep(0.1)  # 降低CPU占用
            except Exception as e:
                logger.error(f"VAD异常: {str(e)}")
                self.stop_vad()
