import pyaudio
import webrtcvad
import numpy as np

# 参数配置
AUDIO_RATE = 16000       # 采样率：16000（支持的采样率包括8000, 16000, 32000或48000）
CHUNK_SIZE = 480         # 每块大小（30ms，保证为10/20/30ms的倍数）
VAD_MODE = 1             # VAD模式设置：0-3, 数值越小对语音判断越保守

# 初始化VAD
vad = webrtcvad.Vad(VAD_MODE)

class SpeechDetector:
    def __init__(self, calibration_seconds=2, chunk_duration_ms=30):
        """
        calibration_seconds：背景噪音校准时长（单位：秒）
        chunk_duration_ms：每块音频时长（默认30ms，与CHUNK_SIZE对应）
        """
        self.calibration_seconds = calibration_seconds
        self.chunk_duration_ms = chunk_duration_ms
        self.calibrated = False
        self.amplitude_threshold = None

        # 连续帧判决参数，防止短时噪音导致误判
        self.speech_state = False         # 当前状态，False为无语音，True为有语音
        self.consecutive_speech = 0       # 连续语音帧计数
        self.consecutive_silence = 0      # 连续静音帧计数
        self.required_speech_frames = 3   # 连续检测到3帧语音后认为进入语音状态（约90ms）
        self.required_silence_frames = 5  # 连续检测到5帧静音后退出语音状态（约150ms）

    def calibrate(self, stream):
        """
        在初始阶段校准背景噪音，计算出适应环境的幅值阈值。
        """
        print("开始校准背景噪音，请保持安静...")
        amplitudes = []
        num_frames = int(self.calibration_seconds * (1000 / self.chunk_duration_ms))
        for i in range(num_frames):
            audio_chunk = stream.read(CHUNK_SIZE, exception_on_overflow=False)
            audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
            amplitude = np.abs(audio_data).mean()
            amplitudes.append(amplitude)
        mean_noise = np.mean(amplitudes)
        std_noise = np.std(amplitudes)
        # 设置阈值为噪声均值加2倍标准差
        self.amplitude_threshold = mean_noise + 2 * std_noise
        print(f"校准完成：噪音均值={mean_noise:.2f}，标准差={std_noise:.2f}，设置阈值={self.amplitude_threshold:.2f}")
        self.calibrated = True

    def process_chunk(self, audio_chunk):
        """
        对每一块数据进行预处理和VAD检测，并采用连续帧策略输出状态切换。
        """
        # 若未校准，则使用一个较低的默认阈值（虽然建议启动后校准）
        amplitude_threshold = self.amplitude_threshold if self.amplitude_threshold is not None else 500

        # 先计算当前音频块的幅值
        audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
        amplitude = np.abs(audio_data).mean()

        # 如果低于阈值则直接作为无语音处理
        if amplitude < amplitude_threshold:
            is_speech = False
        else:
            is_speech = vad.is_speech(audio_chunk, AUDIO_RATE)

        # 更新连续帧计数策略
        if is_speech:
            self.consecutive_speech += 1
            self.consecutive_silence = 0
        else:
            self.consecutive_silence += 1
            self.consecutive_speech = 0

        # 状态转换逻辑
        if not self.speech_state and self.consecutive_speech >= self.required_speech_frames:
            self.speech_state = True
            print("Detected Speech")
        elif self.speech_state and self.consecutive_silence >= self.required_silence_frames:
            self.speech_state = False
            print("No speech")

def main():
    p = pyaudio.PyAudio()
    stream = p.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=AUDIO_RATE,
                    input=True,
                    frames_per_buffer=CHUNK_SIZE)
    
    detector = SpeechDetector()
    
    # 先进行背景噪音校准
    detector.calibrate(stream)
    
    print("开始监听，说话吧...(按Ctrl+C停止)")

    try:
        while True:
            audio_chunk = stream.read(CHUNK_SIZE, exception_on_overflow=False)
            detector.process_chunk(audio_chunk)
    except KeyboardInterrupt:
        print("停止监听")
    finally:
        stream.stop_stream()
        stream.close()
        p.terminate()

if __name__ == "__main__":
    main()
