# speech_utils.py
import time
import re
import numpy as np
import webrtcvad
from pypinyin import pinyin, Style

# 导入配置参数
from asr_config import AUDIO_RATE, CHUNK_SIZE, CHUNK_DURATION_MS, KEYWORD_PINYIN, VAD_MODE

# 模型相关库（请确保已安装对应包与模型文件路径正确）
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
from modelscope.pipelines import pipeline

# 初始化 VAD 实例（全局复用）
vad = webrtcvad.Vad(VAD_MODE)


def extract_chinese_and_convert_to_pinyin(input_string):
    """
    提取输入字符串中的汉字，并转换为拼音。
    用于后续关键词匹配。
    """
    chinese_characters = re.findall(r'[\u4e00-\u9fa5]', input_string)
    chinese_text = ''.join(chinese_characters)
    pinyin_result = pinyin(chinese_text, style=Style.NORMAL)
    pinyin_text = ' '.join(item[0] for item in pinyin_result)
    return pinyin_text


def calibrate_background(stream, calibration_seconds, chunk_duration_ms, chunk_size):
    """
    校准背景噪音：
      采集一定时长的音频数据，计算幅值均值及标准差，
      返回阈值（均值 + 2 * 标准差）。
    """
    print("开始校准背景噪音，请保持安静...")
    amplitudes = []
    num_frames = int(calibration_seconds * (1000 / chunk_duration_ms))
    for _ in range(num_frames):
        audio_chunk = stream.read(chunk_size, exception_on_overflow=False)
        audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
        amplitudes.append(np.abs(audio_data).mean())
    mean_noise = np.mean(amplitudes)
    std_noise = np.std(amplitudes)
    amplitude_threshold = mean_noise + 2 * std_noise
    print(f"校准完成：噪音均值={mean_noise:.2f}，标准差={std_noise:.2f}，阈值={amplitude_threshold:.2f}")
    return amplitude_threshold


def analyze_spectrum(audio_chunk):
    """
    对音频块进行频谱分析：
      - 应用汉宁窗降低泄露
      - 计算 FFT 并统计局部峰值（大于均值1.5倍）
    当峰值数量大于等于3时，返回 True（表示存在语音特征）。
    """
    audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
    if len(audio_data) == 0:
        return False

    window = np.hanning(len(audio_data))
    windowed_data = audio_data * window
    spectrum = np.abs(np.fft.rfft(windowed_data))
    spectral_mean = np.mean(spectrum)
    peak_count = sum(
        1 for i in range(1, len(spectrum) - 1)
        if spectrum[i] > spectrum[i - 1] and spectrum[i] > spectrum[i + 1] and spectrum[i] > spectral_mean * 1.5
    )
    return peak_count >= 3


class BaseSpeechDetector:
    """
    语音检测基类：
      - 处理音频块，根据能量阈值、VAD 及频谱分析判断是否存在语音
      - 利用连续帧计数平滑判断语音状态
      - 当检测到完整语音段结束时，调用 handle_complete_speech() 方法
    """
    def __init__(self, amplitude_threshold, required_speech_frames=2, required_silence_frames=15):
        self.amplitude_threshold = amplitude_threshold
        self.required_speech_frames = required_speech_frames
        self.required_silence_frames = required_silence_frames
        self.speech_buffer = bytearray()
        self.speech_state = False
        self.consecutive_speech = 0
        self.consecutive_silence = 0

    def is_speech(self, audio_chunk):
        """
        判断当前音频块是否包含语音：
          1. 通过能量过滤
          2. 利用 webrtcvad 检测
          3. 频谱分析进行辅助判断
        """
        audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
        amplitude = np.abs(audio_data).mean()
        if amplitude < self.amplitude_threshold:
            return False
        vad_result = vad.is_speech(audio_chunk, AUDIO_RATE)
        spectral_result = analyze_spectrum(audio_chunk)
        return vad_result and spectral_result

    def process_chunk(self, audio_chunk):
        """
        处理每个音频块：
          - 检测到语音时，更新连续语音帧计数并追加音频到缓冲区；
          - 检测为静音时，更新静音计数。当连续静音达到设定值时，调用 handle_complete_speech 处理完整语音段。
        """
        is_speech_chunk = self.is_speech(audio_chunk)
        if is_speech_chunk:
            self.consecutive_speech += 1
            self.consecutive_silence = 0
            if not self.speech_state and self.consecutive_speech >= self.required_speech_frames:
                self.speech_state = True
                print("检测到语音开始")
            if self.speech_state:
                self.speech_buffer.extend(audio_chunk)
        else:
            self.consecutive_silence += 1
            self.consecutive_speech = 0
            if self.speech_state and self.consecutive_silence >= self.required_silence_frames:
                # 追加最后静音块（捕捉尾部信息）
                self.speech_buffer.extend(audio_chunk)
                audio_data = bytes(self.speech_buffer)
                self.handle_complete_speech(audio_data)
                # 重置状态准备下一段语音
                self.speech_state = False
                self.speech_buffer = bytearray()


class ASRSpeechDetector(BaseSpeechDetector):
    """
    仅进行 ASR 识别的语音检测，用于实时将语音转换为文本。
    """
    def __init__(self, amplitude_threshold, asr_model, **kwargs):
        super().__init__(amplitude_threshold, **kwargs)
        self.asr_model = asr_model

    def handle_complete_speech(self, audio_data):
        res = self.asr_model.generate(
            input=audio_data,
            cache={},
            language="zh",
            use_itn=True,
            batch_size_s=60,
            merge_vad=True,
            merge_length_s=15,
        )
        text = rich_transcription_postprocess(res[0]["text"])
        print("ASR识别结果:", text)


class PersonVoiceDetector(BaseSpeechDetector):
    """
    包含关键词匹配与声纹验证的语音检测：
      - 当检测到完整语音时，先通过 ASR 转换为文本，再转换为拼音，
        判断是否包含关键词；
      - 若检测到关键词，则进行声纹验证（将当前音频与预先准备好的参考录音对比）；
      - 若声纹验证成功，则设置 conversation_triggered 标志，由主程序进入对话模式。
    """
    def __init__(self, amplitude_threshold, asr_model, speaker_verifier, **kwargs):
        super().__init__(amplitude_threshold, **kwargs)
        self.asr_model = asr_model
        self.speaker_verifier = speaker_verifier
        self.keyword_pinyin = KEYWORD_PINYIN
        self.conversation_triggered = False

    def handle_complete_speech(self, audio_data):
        res = self.asr_model.generate(
            input=audio_data,
            cache={},
            language="zh",
            use_itn=True,
            batch_size_s=60,
            merge_vad=True,
            merge_length_s=15,
        )
        recognized_text = rich_transcription_postprocess(res[0]["text"])
        print("ASR识别结果:", recognized_text)
        recognized_pinyin = extract_chinese_and_convert_to_pinyin(recognized_text)
        print("拼音结果:", recognized_pinyin)
        if self.keyword_pinyin in recognized_pinyin:
            print("关键词检测成功，开始声纹验证...")
            # 假设“reference_recording.wav”为预先准备的参考录音文件
            verify_result = self.speaker_verifier(["reference_recording.wav", np.frombuffer(audio_data, dtype=np.int16)])
            print("声纹验证结果:", verify_result)
            from config import SPEAKER_VERIFICATION_THRESHOLD
            if verify_result["score"] > SPEAKER_VERIFICATION_THRESHOLD:
                print("声纹验证成功，触发对话模式")
                self.conversation_triggered = True
            else:
                print("声纹验证失败，请重新说话")
        else:
            print("关键词匹配失败")
