import re
import time
import numpy as np
import pyaudio
import webrtcvad
import threading
from pypinyin import pinyin, Style
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
from modelscope.pipelines import pipeline
import pyttsx3
from deepseek_api import get_deepseek_response  # 流式输出
import queue
import threading

# =================== 常量配置 ===================
AUDIO_RATE = 16000       # 采样率（支持8000, 16000, 32000或48000）
CHUNK_SIZE = 480         # 每块大小（30ms，需保证为10/20/30ms的倍数）
VAD_MODE = 1             # VAD 模式（0-3，值越小越保守）

# 语音检测阈值相关参数
DEFAULT_AMPLITUDE_THRESHOLD = 11540.82
REQUIRED_SPEECH_FRAMES = 2       # 连续语音帧阈值（约60ms）
REQUIRED_SILENCE_FRAMES = 15     # 连续静音帧阈值（约450ms）
LONG_SILENCE_FRAMES = 67         # 长时间静音阈值（约1s或更长）

# =================== 全局模型初始化 ===================
vad = webrtcvad.Vad(VAD_MODE)

sound_recognition_model = AutoModel(
    model=r"D:\Downloads\SenseVoiceSmall",
    trust_remote_code=True,
    remote_code="./model.py",
    vad_model="fsmn-vad",
    vad_kwargs={"max_single_segment_time": 30000},
    device="cuda:0",
    use_itn=True,
    disable_update=True,
    disable_pbar=True,
    disable_log=True
)

sound_verifier_model = pipeline(
    task='speaker-verification',
    model=r'D:\Downloads\speech_campplus_sv_zh-cn_3dspeaker_16k'
)

# =================== 工具函数 ===================

# 把deepseek大模型输出的文字通过语音合成输出


# 语音合成队列和线程控制
speech_queue = queue.Queue()
tts_thread = None
stop_flag = False

def tts_worker():
    """专用语音合成工作线程"""
    engine = pyttsx3.init()
    engine.setProperty('rate', 160)
    
    while not stop_flag:
        try:
            content = speech_queue.get(timeout=0.5)
            engine.say(content)
            engine.runAndWait()
        except queue.Empty:
            continue
        except Exception as e:
            print(f"语音合成出错: {e}")
    
    engine.stop()
    print("语音合成线程已安全退出")

# 修改后的talkContent函数
def talkContent(content):
    """非阻塞语音合成（将内容放入队列）"""
    if content.strip():
        speech_queue.put(content)


def extract_chinese_and_convert_to_pinyin(input_string: str) -> str:
    """
    提取字符串中的汉字，并将其转换为拼音字符串。
    """
    chinese_characters = re.findall(r'[\u4e00-\u9fa5]', input_string)
    chinese_text = ''.join(chinese_characters)
    pinyin_list = pinyin(chinese_text, style=Style.NORMAL)
    return ' '.join(item[0] for item in pinyin_list)


def welcome_message():
    """
    显示带颜色和闪烁效果的 ASCII 艺术图案，用于表示成功进入对话系统。
    """
    cyan = "\033[1;96m"
    white = "\033[1;97m"
    blink = "\033[5m"
    reset = "\033[0m"

    border = f"{cyan}    {'-' * 30}{reset}\n"
    ascii_art = (
        "              /\\_____/\\\n"
        "             /  o   o  \\\n"
        "            ( ==  ^  == )\n"
        "             )----O----(\n"
        "            (           )\n"
        "           ( (  )   (  ) )\n"
        "          (__(__)___(__)__)\n"
    )
    print(border)
    print(cyan + blink + ascii_art + reset)
    print(border)
    print(f"{white}    喵喵喵!!{reset}\n")


def calibrate(stream, calibration_seconds=2, chunk_duration_ms=30) -> float:
    """
    校准背景噪音：录制指定时长的音频并计算平均幅值与标准差，从而设置自适应阈值。
    """
    print("开始校准背景噪音，请保持安静...")
    amplitudes = []
    num_frames = int(calibration_seconds * (1000 / chunk_duration_ms))
    for _ in range(num_frames):
        audio_chunk = stream.read(CHUNK_SIZE, exception_on_overflow=False)
        audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
        amplitudes.append(np.abs(audio_data).mean())
    mean_noise = np.mean(amplitudes)
    std_noise = np.std(amplitudes)
    amplitude_threshold = mean_noise + 2 * std_noise
    print(f"校准完成：噪音均值={mean_noise:.2f}，标准差={std_noise:.2f}，设置阈值={amplitude_threshold:.2f}")
    return amplitude_threshold


# =================== 语音检测类 ===================
class SpeechDetector:
    """
    负责处理音频块，并结合能量预处理、VAD及频谱分析实现语音检测。
    """
    def __init__(self, amplitude_threshold: float):
        self.amplitude_threshold = amplitude_threshold
        self.speech_buffer = bytearray()    # 存储当前语音段的原始音频数据
        self.speech_state = False           # 是否处于语音状态
        self.consecutive_speech = 0         # 连续检测到语音的帧数
        self.consecutive_silence = 0        # 连续检测到静音的帧数

    def analyze_spectrum(self, audio_chunk: bytes) -> bool:
        """
        通过频谱分析判断当前音频块是否具有语音特性。
        """
        audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
        if audio_data.size == 0:
            return False

        window = np.hanning(len(audio_data))
        windowed_data = audio_data * window
        spectrum = np.abs(np.fft.rfft(windowed_data))
        spectral_mean = np.mean(spectrum)

        peak_count = sum(
            1 for i in range(1, len(spectrum) - 1)
            if spectrum[i] > spectrum[i - 1] and spectrum[i] > spectrum[i + 1] and spectrum[i] > spectral_mean * 1.5
        )
        return peak_count >= 3

    def is_speech(self, audio_chunk: bytes) -> bool:
        """
        判断当前音频块是否包含语音信息，综合能量阈值、VAD和频谱分析结果。
        """
        threshold = self.amplitude_threshold or DEFAULT_AMPLITUDE_THRESHOLD
        audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
        amplitude = np.abs(audio_data).mean()
        if amplitude < threshold:
            return False

        vad_result = vad.is_speech(audio_chunk, AUDIO_RATE)
        spectral_result = self.analyze_spectrum(audio_chunk)
        return vad_result and spectral_result

    def sound2text(self, audio_data: bytes) -> str:
        """
        利用 ASR 模型将音频数据转换为文本。
        """
        res = sound_recognition_model.generate(
            input=audio_data,
            cache={},
            language="zh",
            use_itn=True,
            batch_size_s=60,
            merge_vad=True,
            merge_length_s=15,
        )
        return rich_transcription_postprocess(res[0]["text"])

    def process_chunk(self, audio_chunk: bytes):
        """
        处理单个音频块：
          - 检测语音状态并更新计数器
          - 当静音达到阈值时调用 ASR 模型进行语音转写
        :return: (recognized_text, audio_data) 或 (None, None)
        """
        recognized_text = None
        is_speech_chunk = self.is_speech(audio_chunk)

        if is_speech_chunk:
            self.consecutive_speech += 1
            self.consecutive_silence = 0
            if not self.speech_state and self.consecutive_speech >= REQUIRED_SPEECH_FRAMES:
                self.speech_state = True
            if self.speech_state:
                self.speech_buffer.extend(audio_chunk)
        else:
            self.consecutive_silence += 1
            self.consecutive_speech = 0
            if self.consecutive_silence >= LONG_SILENCE_FRAMES:
                recognized_text = "长时间无语音"
                return recognized_text, None

            if self.speech_state and self.consecutive_silence >= REQUIRED_SILENCE_FRAMES:
                self.speech_buffer.extend(audio_chunk)
                print(f"\n======采集到的语音数据长度：{len(self.speech_buffer)}")
                if len(self.speech_buffer) > CHUNK_SIZE * 5:
                    recognized_text = self.sound2text(bytes(self.speech_buffer))
                self.speech_state = False
                audio_data = self.speech_buffer
                self.speech_buffer = bytearray()
                return recognized_text, audio_data

        return recognized_text, None


def ASR_API(stream, detector: SpeechDetector):
    """
    持续监听音频流，检测语音并返回识别结果。
    :yield: (recognized_text, audio_data)
    """
    while True:
        audio_chunk = stream.read(CHUNK_SIZE, exception_on_overflow=False)
        recognized_text, audio_data = detector.process_chunk(audio_chunk)
        if recognized_text:
            yield recognized_text, audio_data


# =================== 打断监听线程 ===================
def ds_interrupt_listener(stream, detector, interrupt_key_word_pinyin, interrupt_event):
    """
    改为非阻塞方式读取流中数据，检测中断关键词。
    """
    while not interrupt_event.is_set():
        available = stream.get_read_available()
        if available < CHUNK_SIZE:
            time.sleep(0.01)
            continue

        try:
            audio_chunk = stream.read(CHUNK_SIZE, exception_on_overflow=False)
        except Exception as e:
            print(f"读取音频数据时出错: {e}")
            continue
        print(f"时刻准备打断deepseek的输出！！!")
        recognized_text, _ = detector.process_chunk(audio_chunk)
        if recognized_text:
            chinese_pinyin = extract_chinese_and_convert_to_pinyin(recognized_text)
            if interrupt_key_word_pinyin in chinese_pinyin:
                print("================================识别到关键词，打断DS大模型输出===================================")
                interrupt_event.set()
                break


# =================== 主程序入口 ===================
def main():
    p = pyaudio.PyAudio()
    stream = p.open(
        format=pyaudio.paInt16,
        channels=1,
        rate=AUDIO_RATE,
        input=True,
        frames_per_buffer=CHUNK_SIZE
    )

    global tts_thread, stop_flag
    # 初始化语音合成线程
    tts_thread = threading.Thread(target=tts_worker, daemon=True)
    tts_thread.start()


    # 可选：校准背景噪音（也可直接采用预先定义的阈值）
    amplitude_threshold = DEFAULT_AMPLITUDE_THRESHOLD  # 或 amplitude_threshold = calibrate(stream)
    detector = SpeechDetector(amplitude_threshold=amplitude_threshold)
    key_word_pinyin = "ni hao xiao bai"  # 示例关键词：你好小白
    interrupt_key_word_pinyin = "zan ting"  # 中断关键词：暂停

    print("开始监听，请开始说话...(按 Ctrl+C 停止)")
    try:
        while True:


            print("============================开始新的一轮监听===============================")
            
            print("等待说话人验证...")
            time.sleep(2)
            print("验证成功")
            # =====================================================================================================#
            # 示例：说话人验证（部分代码已注释，可根据需要开启）
            # for chinese_string, audio_byte_data in ASR_API(stream, detector):
            #     print("识别结果:", chinese_string)
            #     chinese_pinyin = extract_chinese_and_convert_to_pinyin(chinese_string)
            #     if key_word_pinyin in chinese_pinyin:
            #         print("================================识别到关键词，开始识别===================================")
            #         recongnize_result = sound_verifier_model(
            #             ["person_recording_1.wav", np.frombuffer(audio_byte_data, dtype=np.int16)]
            #         )
            #         print("声纹识别结果:", recongnize_result)
            #         if recongnize_result["score"] > 0.6:
            #             print("===================================声纹识别成功，开始对话===================================")
            #             welcome_message()
            #             time.sleep(5)
            #             break
            # =====================================================================================================#

            # =====================================================================================================#
            # 当验证通过后开始对话处理；2秒无语音则重新验证
            for recognized_text, _ in ASR_API(stream, detector):
                if recognized_text == "长时间无语音":
                    print("====识别结果: 长时间无语音，中断识别!")
                    break
                elif recognized_text.strip() == "":
                    print("====识别结果为空，跳过识别!")
                    continue
                else:
                    # 获取 DS 大模型回复（生成器方式）
                    deepseek_response_generator = get_deepseek_response(recognized_text)

                    # 为实现可打断的流式输出逻辑，在进入 DS 回复前重新打开音频流

                    # 初始化线程间的中断事件
                    interrupt_event = threading.Event()
                    # 开启监听中断关键词的线程
                    interrupt_thread = threading.Thread(
                        target=ds_interrupt_listener,
                        args=(stream, detector, interrupt_key_word_pinyin, interrupt_event)
                    )
                    interrupt_thread.daemon = False
                    interrupt_thread.start()

                    # 遍历 DS 大模型的输出，检查是否有中断请求
                    for deepseek_response_text in deepseek_response_generator:
                        if interrupt_event.is_set():
                            print("中断DS输出响应，退出DS回复循环。")
                            break
                        # 这里调用语音合成函数（假设已实现 tts）或者直接打印
                        talkContent(deepseek_response_text)
                        print("DS大模型输出结果:", deepseek_response_text)

                    # 通知中断线程结束，并等待其退出
                    interrupt_event.set()
                    interrupt_thread.join()



                    
                print("\n")
            print("\n")
            # =====================================================================================================#
            # 这里可以扩展更多逻辑，例如重新验证说话人、处理其他命令等

    except KeyboardInterrupt:
        print("检测到 Ctrl+C，退出程序。")
    finally:
        stop_flag = True
        tts_thread.join()
        stream.stop_stream()
        stream.close()
        p.terminate()


if __name__ == "__main__":
    main()
