import json
import time
import numpy as np
import pyttsx3
from vosk import Model, KaldiRecognizer
from biz.utils.logger import logger
from biz.integrations.aliyun.speech_recognition_api import recognize_audio
from biz.config.settings import ASRConfig
from biz.integrations.funasr.funasr_api import FunasrApi
from fastapi import WebSocket
import os

class VoiceAssistant:
    def __init__(self, websocket: WebSocket, model_path=ASRConfig.VOSK_MODEL_PATH, wake_words=ASRConfig.WAKE_WORDS):
        self.websocket = websocket
        try:
            if not os.path.exists(model_path):
                logger.warning(f"VOSK模型路径不存在: {model_path}，将使用备用方案")
                self.model = None
                self.recognizer = None
            else:
                self.model = Model(model_path)
                self.recognizer = KaldiRecognizer(self.model, 16000)
                logger.info(f"VOSK模型加载成功: {model_path}")
        except Exception as e:
            logger.error(f"VOSK模型初始化失败: {e}，将使用备用方案")
            self.model = None
            self.recognizer = None

        self.wake_words = [word.lower() for word in (wake_words if isinstance(wake_words, list) else [wake_words])]
        self.engine = pyttsx3.init()
        self.PROCESS_INTERVAL = ASRConfig.PROCESS_INTERVAL
        self.SILENCE_THRESHOLD = ASRConfig.SILENCE_THRESHOLD
        self.SILENCE_DURATION = ASRConfig.SILENCE_DURATION
        self.rcg = FunasrApi(uri=os.getenv('FUNASR_URL'), keep_alive=True, msg_callback=self.default_msg_callback)
        self.wake_word_detected = False
        self._init_state()
        self._setup_voice()
        logger.info("初始化语音助手")

    def _init_state(self):
        self.is_waked = False
        self.audio_buffer = bytearray()
        self.last_process_time = None
        self.last_active_time = None
        self.waiting_message_sent = False

    def _setup_voice(self):
        voices = self.engine.getProperty('voices')
        for voice in voices:
            if 'chinese' in voice.name.lower():
                self.engine.setProperty('voice', voice.id)
                break

    def default_msg_callback(self, msg):
        """
        默认的消息回调函数，用于处理从语音识别服务器接收到的消息
        参数:
            msg: 从服务器接收到的消息
        """
        try:
            logger.info(f"回调识别结果: {msg}")
            if msg['text'] is not None:
                asr_text = self._remove_punctuation(msg['text'])
                # 检查是否包含唤醒词
                if any(wake_word in asr_text for wake_word in self.wake_words):
                    logger.info("检测到唤醒词！")
                    self.is_waked = True
                    self.wake_word_detected = True
        except Exception as e:
            print("处理消息时出错:", e)

    async def process_audio(self, audio_data: bytes) -> dict:
        try:
            # 检查是否有唤醒词被检测到
            if self.wake_word_detected:
                self.wake_word_detected = False  # 重置标志
                await self.websocket.send_json({"status": "wake_word", "message": "唤醒词已触发"})
            if not self.is_waked:
                return self._process_wake_word_asr(audio_data)
            return await self._process_command_audio(audio_data)
        except Exception as e:
            logger.error(f"处理音频时发生错误: {str(e)}")
            return {"status": "error", "message": f"处理失败: {str(e)}"}

    def _process_wake_word(self, audio_data: bytes) -> dict:
        if self.recognizer.AcceptWaveform(audio_data):
            result = json.loads(self.recognizer.Result())
            vosk_text = result.get("text", "").lower()
            if vosk_text:
                vosk_text = vosk_text.replace(" ", "")
                logger.info(f"Vosk识别结果: {vosk_text}")
                
                if any(wake_word in vosk_text for wake_word in self.wake_words):
                    logger.info("检测到唤醒词！")
                    self.is_waked = True
                    self.waiting_message_sent = False
                    return {
                        "status": "wake_word",
                        "message": "唤醒词已触发"
                    }
        # 只有在消息未发送时才发送等待消息
        if not self.waiting_message_sent:
            self.waiting_message_sent = True
            return {"status": "listening", "message": "正在等待唤醒"}

    def _remove_punctuation(self, text):
        """去除文本中的标点符号"""
        for punct in ASRConfig.PUNCTUATION:
            text = text.replace(punct, "")
        return text

    def _process_wake_word_asr(self, audio_data: bytes) -> dict:
        self.rcg.rec_buf(audio_data)
        # 只有在消息未发送时才发送等待消息
        if not self.waiting_message_sent:
            self.waiting_message_sent = True
            return {"status": "listening", "message": "正在等待唤醒"}

    async def _asr_close(self):
        self.rcg.funasr_close()

    async def _process_command_audio(self, audio_data: bytes) -> dict:
        try:
            # 已唤醒状态，使用 FunASR 进行命令识别
            if self.last_process_time is None:
                self.last_process_time = time.time()
                self.last_active_time = time.time()
            
            current_time = time.time()
            # 将字节数组转为 int16 数组
            chunk_data = np.frombuffer(audio_data, dtype=np.int16)
            volume = np.mean(np.abs(chunk_data))
            # 将新的音频数据添加到缓冲区
            self.audio_buffer.extend(audio_data)

             # 更新最后活跃时间
            if volume >= self.SILENCE_THRESHOLD:
                self.last_active_time = current_time

            # 1. 静音持续3秒 或 2. 总时长达到10秒
            silence_duration = current_time - self.last_active_time
            total_duration = current_time - self.last_process_time
            
            if (silence_duration >= self.SILENCE_DURATION) or (total_duration >= self.PROCESS_INTERVAL):
                # 处理音频数据
                text = recognize_audio(bytes(self.audio_buffer), num_processes=1, timeout=30)
                logger.info(f"FunASR识别结果: {text}")
                
                # 重置所有状态
                self.audio_buffer = bytearray()
                self.last_process_time = None
                self.last_active_time = None
                
                if not text:
                    return {"status": "no_speech", "message": "未检测到语音"}

                text = text.replace(" ", "")
                response = self.process_command(text)
                self.is_waked = False
                return {
                    "status": "command",
                    "message": response,
                    "text": text
                }
            
        except Exception as e:
            logger.error(f"处理音频时发生错误: {str(e)}")
            return {"status": "error", "message": f"处理失败: {str(e)}"}

    def process_command(self, command: str) -> str:
        logger.info(f"处理命令: {command}")
        if "再见" in command:
            return "GOOD_BYE"
        # elif "调光" in command:
        #     return "/pages/web/chatOperationWeb"
        # elif "导航" in command:
        #     return "/pages/web/chatNavigateWeb"
        # elif "知识库" in command:
        #     return "/pages/web/chatKnowWeb"
        # elif "数据分析" in command:
        #     return "/pages/web/chatQueryWeb"
        else:
            return "None"