import io
import logging
import os
import shutil
import tempfile
import threading
import pyttsx3
import vosk
import json
import sounddevice as sd
import re


class TTSManager:
    _engine_lock = threading.Lock()
    _current_engine = None
    _stop_event = threading.Event()

    @classmethod
    def speak(cls, text):
        def _speak_task():
            with cls._engine_lock:
                try:
                    engine = pyttsx3.init()
                    cls._current_engine = engine
                    engine.setProperty('rate', 150)
                    engine.setProperty('volume', 1.0)

                    def listen_for_stop():
                        model_path = "vosk-model-small-cn-0.22"
                        vosk_model = vosk.Model(model_path)
                        recognizer = vosk.KaldiRecognizer(vosk_model, 16000)

                        # 使用 sounddevice 录音
                        with sd.InputStream(
                                samplerate=16000,
                                channels=1,
                                dtype='int16',
                                blocksize=4096  # 匹配原代码的缓冲区大小
                        ) as stream:
                            while not cls._stop_event.is_set():
                                # 读取一帧音频数据（返回 numpy 数组）
                                data, _ = stream.read(4096)  # 每次读取 4096 个样本（16 位单通道）
                                # 转换为 bytes 格式（Vosk 需要原始字节数据）
                                audio_bytes = data.tobytes()

                                if recognizer.AcceptWaveform(audio_bytes):
                                    result = json.loads(recognizer.Result())
                                    command = result.get("text", "").strip()
                                    if "停止" in command:
                                        print("检测到停止命令，中断语音合成...")
                                        cls.stop_speaking()
                                        break

                    microphone_thread = threading.Thread(target=listen_for_stop, daemon=True)
                    microphone_thread.start()

                    engine.say(text)
                    engine.startLoop(False)
                    engine.iterate()

                    while engine.isBusy() and not cls._stop_event.is_set():
                        sd.sleep(0.1)  # 使用 sounddevice 的睡眠函数

                    engine.endLoop()
                    engine.stop()
                    cls._current_engine = None
                except Exception as e:
                    print(f"语音合成失败: {str(e)}")
                finally:
                    cls._stop_event.clear()

        threading.Thread(target=_speak_task, daemon=True).start()

    @classmethod
    def stop_speaking(cls):
        with cls._engine_lock:
            if cls._current_engine is not None:
                cls._current_engine.stop()
                cls._current_engine = None
                print("语音合成已中断")
            cls._stop_event.set()

    @classmethod
    def save_to_file(cls, text, tmp_file):
        engine = pyttsx3.init()
        engine.setProperty('rate', 150)
        engine.setProperty('volume', 1.0)
        engine.save_to_file(text, tmp_file)
        engine.runAndWait()
        engine.stop()
        return tmp_file

    @classmethod
    def text_to_stream(cls, text: str) -> bytes:
        """可靠的音频流生成方法"""
        try:
            # 创建临时目录
            temp_dir = tempfile.mkdtemp()
            temp_path = os.path.join(temp_dir, "temp.wav")

            # 生成到临时文件
            engine = pyttsx3.init()
            engine.setProperty('rate', 150)
            engine.setProperty('volume', 1.0)
            engine.save_to_file(text, temp_path)
            engine.runAndWait()

            # 读取文件内容
            with open(temp_path, "rb") as f:
                audio_data = f.read()

            # 清理临时文件
            shutil.rmtree(temp_dir)

            return audio_data
        except Exception as e:
            logging.error(f"语音生成失败: {str(e)}")
            return b""
        finally:
            if 'engine' in locals():
                engine.stop()


class STTManager:
    CHUNK_SIZE = 4000  # Vosk推荐的分块大小

    def __init__(self, vosk_model_path):
        # 初始化VOSK语音识别模型
        self.vosk_model = vosk.Model(vosk_model_path)
        self.sample_rate = 16000

    @staticmethod
    def speech_to_text(self, audio_data):
        recognizer = vosk.KaldiRecognizer(self.vosk_model, self.sample_rate)

        try:
            recognizer.AcceptWaveform(audio_data)
            result = json.loads(recognizer.FinalResult())
            rep_rs = result.get("text", "").replace(" ", "")
            pro_rs = process_speech_text(rep_rs)
            return pro_rs
        except Exception as e:
            print(f"Error: {e}")
            return "生成语音文本时出错，请稍后再试。"


def process_speech_text(text):
    # 预定义需要过滤的短词列表
    FILTER_WORDS = ['嗯', '啊', '哦', '呃', '那个', '这个']
    # 预定义纠错字典，键为错误文本，值为正确文本
    CORRECTION_DICT = {
        '吸气学习': '机器学习',
        '到模型': '大模型',
        # 可以添加更多的纠错对
    }
    # 预定义文本补充字典，键为不完整文本，值为完整文本
    COMPLETION_DICT = {
        '度学': '深度学习',
        # 可以添加更多的补充对
    }
    # 1. 过滤短词
    filtered_text = text
    for word in FILTER_WORDS:
        # 使用正则表达式来匹配并替换短词，避免部分匹配
        # 例如，避免将'嗯啊'中的'嗯'单独过滤掉
        # 使用词边界\b来确保匹配的是完整的词
        filtered_text = re.sub(r'\b' + re.escape(word) + r'\b', '', filtered_text)

    # 2. 文本纠错
    corrected_text = filtered_text
    for wrong, correct in CORRECTION_DICT.items():
        corrected_text = corrected_text.replace(wrong, correct)

    # 3. 文本补充
    completed_text = corrected_text
    for incomplete, complete in COMPLETION_DICT.items():
        completed_text = completed_text.replace(incomplete, complete)

    # 去除多余的空格和换行符
    cleaned_text = ' '.join(completed_text.split())

    return cleaned_text
