import json
import queue
import threading
import uuid
from abc import ABC, abstractmethod
import logging
from concurrent.futures import ThreadPoolExecutor, TimeoutError
import argparse
import time

# 导入所需模块
from core import recording, vad, asr, audio_player, llm, chatCharacter, tts, memory
from core.prompt import sys_prompt
from core.chatCharacter import chatCharacter
from core.dialogue import Message, Dialogue
from utils.utils import (
    is_interrupt,
    readConfig,
    readJson,
    is_segment,
    extract_json_from_string,
    is_segment_sentence
)

from core.registry import Action
from core.task_manager import TaskManager

# 配置日志
logger = logging.getLogger(__name__)


class VoiceAssistant(ABC):
    """语音助手基类，实现了语音交互的完整流程"""

    def __init__(self, config_path, websocket=None, event_loop=None, character_id=None):
        """
        初始化语音助手

        Args:
            config_path: 配置文件路径
            websocket: WebSocket连接对象(可选)
            event_loop: 事件循环对象(可选)
        """
        # 加载配置
        self.config = readConfig(config_path)

        # 初始化音频处理队列
        self.audio_buffer = queue.Queue()


        # 初始化角色管理（移到_init_language_modules之前）
        self.chatCharacter = chatCharacter()
        self.cur_Character = character_id or "default"
        
        # 初始化各个功能模块
        self._init_audio_modules()

        # 加载系统提示词
        if self.cur_Character != "default":
            character_info = self.chatCharacter.get_character(self.cur_Character)
            if character_info:
                self.system_prompt = character_info['system_prompt']
        else:
            # 对于默认角色，使用导入的默认提示词
            self.system_prompt = sys_prompt

        # 现在可以安全地初始化语言模块了
        self._init_language_modules()
        self._init_control_components()

        # WebSocket模式初始化（移到后面）
        if self.config["choice"]["player"].lower().find("websocket") > -1:
            self.audio_output.init(websocket, event_loop)
            self.set_dialogue_handler(self.audio_output.send_messages)

    def _init_audio_modules(self):
        """初始化音频相关模块"""
        # 录音模块
        self.recording = recording.create_instance(
            self.config["choice"]["recording"],
            self.config["recording"][self.config["choice"]["recording"]]
        )

        # 语音识别模块
        self.speech_to_text = asr.create_instance(
            self.config["choice"]["asr"],
            self.config["asr"][self.config["choice"]["asr"]]
        )

        # 设置采用的音色
        tts_config = self.config["tts"][self.config["choice"]["tts"]]
        tts_config["spk_emb_file"] = self.chatCharacter.get_speaker(self.cur_Character)
        logger.info(f"使用角色 {self.cur_Character} 的音色: {tts_config}")

        # 语音合成模块
        self.text_to_speech = tts.create_instance(
            self.config["choice"]["tts"],
            tts_config
        )

        # 语音活动检测模块
        self.vad_processor = vad.create_instance(
            self.config["choice"]["vad"],
            self.config["vad"][self.config["choice"]["vad"]]
        )

        # 音频播放模块
        self.audio_output = audio_player.create_instance(
            self.config["choice"]["player"],
            self.config["player"][self.config["choice"]["player"]]
        )

        # VAD结果队列
        self.vad_buffer = queue.Queue()

    def _init_language_modules(self):
        """初始化语言处理模块"""
        # 语言模型
        self.llm_engine = llm.create_instance(
            self.config["choice"]["llm"],
            self.config["llm"][self.config["choice"]["llm"]]
        )

        # 记忆和对话管理
        self.conversation_store = memory.Memory(self.config.get("memory"))

        # 修改为使用self.system_prompt
        # 添加安全检查确保self.system_prompt已定义
        if not hasattr(self, 'system_prompt'):
            # 作为最后的回退，使用默认提示词
            from core.prompt import sys_prompt
            self.system_prompt = sys_prompt

        system_prompt = self.system_prompt.replace("{memory}", self.conversation_store.get_memory()).strip()

        # 对话历史管理
        self.dialogue_history = Dialogue(self.config["memory"]["dialogue_history_path"])
        self.dialogue_history.put(Message(role="system", content=system_prompt))

    def _init_control_components(self):
        """初始化控制组件"""
        # 线程池用于并行处理
        self.execution_pool = ThreadPoolExecutor(max_workers=10)

        # TTS优先级队列
        self.tts_processing_queue = queue.Queue()

        # 任务队列
        self.background_task_queue = queue.Queue()
        self.task_coordinator = TaskManager(self.config.get("taskManager"), self.background_task_queue)

        # 状态控制变量
        self.is_recording_active = True
        self.is_voice_active = True
        self.interrupt_enabled = self.config["interrupt"]
        self.silence_duration_ms = int((1000 / 1000) * (16000 / 512))  # 计算静音时长

        # 线程安全控制
        self.conversation_in_progress = False
        self.stop_signal = threading.Event()
        self.dialogue_callback = None

        # 语音数据缓冲区
        self.current_speech_data = []

        # 任务模式配置
        self.function_call_mode = self.config.get("StartTaskMode")

    def set_dialogue_handler(self, callback):
        """设置对话回调函数"""
        self.dialogue_callback = callback

    def _process_vad_stream(self):
        """处理VAD流的后台线程"""

        def vad_worker():
            while not self.stop_signal.is_set():
                try:
                    audio_chunk = self.audio_buffer.get()
                    vad_result = self.vad_processor.is_vad(audio_chunk)
                    self.vad_buffer.put({"audio": audio_chunk, "vad_status": vad_result})
                except Exception as e:
                    logger.error(f"语音活动检测处理错误: {e}")

        vad_thread = threading.Thread(target=vad_worker, daemon=True)
        vad_thread.start()

    def _manage_tts_priority(self):
        """管理TTS优先级的后台线程"""

        def tts_priority_worker():
            while not self.stop_signal.is_set():
                try:
                    tts_future = self.tts_processing_queue.get()
                    try:
                        audio_file = tts_future.result(timeout=50)
                    except TimeoutError:
                        logger.error("语音合成任务超时")
                        continue
                    except Exception as e:
                        logger.error(f"语音合成任务错误: {e}")
                        continue

                    if audio_file is not None:
                        self.audio_output.play(audio_file)
                except Exception as e:
                    logger.error(f"TTS优先级管理线程错误: {e}")

        tts_thread = threading.Thread(target=tts_priority_worker, daemon=True)
        tts_thread.start()

    def stop_audio_playback(self):
        """停止当前音频播放"""
        logger.info("正在中断当前播放")
        self.audio_output.stop()

    def terminate(self):
        """安全终止所有资源"""
        logger.info("正在关闭语音助手...")
        self.stop_signal.set()
        self.execution_pool.shutdown(wait=True)
        self.recording.stop_recording()
        self.recording.shutdown()
        logger.info("关闭完成")

    def initialize_audio_processing(self):
        """初始化音频处理流程"""
        # 启动录音
        self.recording.startRecording(self.audio_buffer)
        logger.info("录音已启动")

        # 启动VAD处理
        self._process_vad_stream()

        # 启动TTS优先级管理
        self._manage_tts_priority()

    def _handle_audio_stream(self):
        """处理音频流的核心方法"""
        # 获取VAD处理结果
        vad_data = self.vad_buffer.get()

        # 收集语音数据
        if self.is_voice_active:
            self.current_speech_data.append(vad_data)

        vad_status = vad_data.get("vad_status")

        # 处理后台任务
        if (not self.background_task_queue.empty() and not self.is_voice_active and
                vad_status is None and not self.audio_output.get_playing_status() and
                not self.conversation_in_progress):
            task_result = self.background_task_queue.get()
            future = self.execution_pool.submit(self.generate_and_play_speech, task_result.response)
            self.tts_processing_queue.put(future)

        # 处理VAD状态
        if vad_status is None:
            return

        # 检测到语音开始
        if "start" in vad_status:

            if self.audio_output.get_playing_status() or self.conversation_in_progress:
                # 处理打断场景
                if self.interrupt_enabled:
                    self.conversation_in_progress = False
                    self.stop_audio_playback()
                    self.is_voice_active = True
                    self.current_speech_data.append(vad_data)
                else:
                    return
            else:
                # 正常开始录音
                self.is_voice_active = True
                self.current_speech_data.append(vad_data)

        # 检测到语音结束
        elif "end" in vad_status and len(self.current_speech_data) > 0:
            try:
                logger.debug(f"语音数据片段数量: {len(self.current_speech_data)}")
                self.is_voice_active = False

                # 提取语音数据并进行识别
                audio_frames = [d["audio"] for d in self.current_speech_data]
                recognized_text, temp_file = self.speech_to_text.recognizer(audio_frames)
                self.current_speech_data = []

                # 处理识别结果
                if not recognized_text.strip():
                    logger.debug("识别结果为空，跳过处理")
                    return

                logger.debug(f"语音识别结果: {recognized_text}")

                # 通知回调
                if self.dialogue_callback:
                    self.dialogue_callback({"role": "user", "content": str(recognized_text)})

                # 异步处理对话
                self.execution_pool.submit(self.process_conversation, recognized_text)

            except Exception as e:
                self.is_voice_active = False
                self.current_speech_data = []
                logger.error(f"语音识别错误: {e}")
                return

        return True

    def run(self):
        """启动语音助手主循环"""
        try:
            self.initialize_audio_processing()
            while not self.stop_signal.is_set():
                self._handle_audio_stream()
        except KeyboardInterrupt:
            logger.info("收到中断信号，正在退出...")
        finally:
            self.terminate()

    def generate_and_play_speech(self, text):
        """生成语音并播放"""
        if not text or len(text) == 0:
            logger.info(f"无需语音合成，文本为空: {text}")
            return None

        audio_file = self.text_to_speech.to_tts(text)
        if audio_file is None:
            logger.error(f"语音合成失败: {text}")
            return None

        logger.debug(f"语音文件生成完成")
        return audio_file

    def process_conversation_with_tools(self, user_query):
        """使用工具处理对话"""
        chunk_start = 0
        response_chunks = []

        try:
            start_time = time.time()
            llm_stream = self.llm_engine.response_call(
                self.dialogue_history.get_llm_dialogue(),
                functions_call=self.task_coordinator.get_functions()
            )
        except Exception as e:
            logger.error(f"语言模型处理错误 {user_query}: {e}")
            return []

        is_function_call = False
        function_info = {
            "name": None,
            "id": None,
            "arguments": "",
            "content_args": ""
        }

        # 处理流式响应
        for chunk in llm_stream:
            content, tool_call = chunk

            # 检测函数调用标记
            if content and len(content) > 0:
                if len(response_chunks) <= 0 and content == "```":
                    is_function_call = True

            # 提取工具调用信息
            if tool_call:
                is_function_call = True
                if tool_call[0].id:
                    function_info["id"] = tool_call[0].id
                if tool_call[0].function.name:
                    function_info["name"] = tool_call[0].function.name
                if tool_call[0].function.arguments:
                    function_info["arguments"] += tool_call[0].function.arguments

            # 处理内容
            if content and len(content) > 0:
                if is_function_call:
                    function_info["content_args"] += content
                else:
                    response_chunks.append(content)
                    full_response = "".join(response_chunks)
                    end_time = time.time()

                    logger.debug(f"模型响应时间: {end_time - start_time} 秒, 生成token: {content}")

                    # 分段处理
                    should_segment, segment_index = is_segment_sentence(full_response, chunk_start)
                    logger.debug(f"分段状态: {should_segment}, 分段索引: {segment_index}")

                    if should_segment:
                        segment_text = full_response[chunk_start:segment_index + 1]
                        # 确保文本长度合理
                        if len(segment_text) > max(2, chunk_start):
                            future = self.execution_pool.submit(self.generate_and_play_speech, segment_text)
                            self.tts_processing_queue.put(future)
                            chunk_start = segment_index + 1

        # 处理剩余文本
        if not is_function_call:
            full_response = "".join(response_chunks)
            if chunk_start < len(full_response):
                remaining_text = full_response[chunk_start:]
                future = self.execution_pool.submit(self.generate_and_play_speech, remaining_text)
                self.tts_processing_queue.put(future)
        else:
            # 处理函数调用
            if function_info["id"] is None:
                # 从内容中提取JSON格式的函数调用
                json_content = extract_json_from_string(function_info["content_args"])
                if json_content:
                    try:
                        func_data = json.loads(json_content)
                        function_info["name"] = func_data["function_name"]
                        function_info["arguments"] = json.dumps(func_data["args"], ensure_ascii=False)
                        function_info["id"] = str(uuid.uuid4().hex)
                    except json.JSONDecodeError:
                        return []
                else:
                    return []

            # 解析参数
            try:
                function_args = json.loads(function_info["arguments"])
            except json.JSONDecodeError:
                return []

            logger.info(
                f"函数调用: {function_info['name']}, ID: {function_info['id']}, "
                f"参数: {function_args}"
            )

            # 执行工具调用
            tool_result = self.task_coordinator.tool_call(
                function_info["name"],
                function_args
            )

            # 处理不同的执行结果
            if tool_result.action == Action.NOTFOUND:
                logger.error(f"未找到函数: {function_info['name']}")
                return []
            elif tool_result.action == Action.NONE:
                return []
            elif tool_result.action == Action.RESPONSE:
                future = self.execution_pool.submit(self.generate_and_play_speech, tool_result.response)
                self.tts_processing_queue.put(future)
                return [tool_result.response]
            elif tool_result.action == Action.REQLLM:
                # 添加函数调用到对话历史
                self.dialogue_history.put(Message(
                    role='assistant',
                    tool_calls=[{
                        "id": function_info["id"],
                        "function": {
                            "arguments": json.dumps(function_args, ensure_ascii=False),
                            "name": function_info["name"]
                        },
                        "type": 'function',
                        "index": 0
                    }]
                ))

                # 添加工具执行结果
                self.dialogue_history.put(Message(
                    role="tool",
                    tool_call_id=function_info["id"],
                    content=tool_result.result
                ))

                # 再次调用LLM
                return self.process_conversation_with_tools(user_query)
            elif tool_result.action == Action.ADDSYSTEM:
                self.dialogue_history.put(Message(**tool_result.result))
                return []
            elif tool_result.action == Action.ADDSYSTEMSPEAK:
                # 添加函数调用和结果
                self.dialogue_history.put(Message(
                    role='assistant',
                    tool_calls=[{
                        "id": function_info["id"],
                        "function": {
                            "arguments": json.dumps(function_args, ensure_ascii=False),
                            "name": function_info["name"]
                        },
                        "type": 'function',
                        "index": 0
                    }]
                ))

                self.dialogue_history.put(Message(
                    role="tool",
                    tool_call_id=function_info["id"],
                    content=tool_result.response
                ))

                # 添加系统消息并继续对话
                self.dialogue_history.put(Message(**tool_result.result))
                self.dialogue_history.put(Message(role="user", content="ok"))
                return self.process_conversation_with_tools(user_query)
            else:
                logger.error(f"未知的操作类型: {tool_result.action}")

        return response_chunks

    def process_conversation(self, user_query):
        """处理用户对话"""
        # 记录用户输入
        self.dialogue_history.put(Message(role="user", content=user_query))

        response_chunks = []
        chunk_start = 0
        self.conversation_in_progress = True

        # 根据配置选择处理模式
        if self.function_call_mode:
            response_chunks = self.process_conversation_with_tools(user_query)
        else:
            # 直接调用LLM获取响应
            try:
                start_time = time.time()
                llm_stream = self.llm_engine.response(
                    self.dialogue_history.get_llm_dialogue()
                )
            except Exception as e:
                self.conversation_in_progress = False
                logger.error(f"语言模型处理错误 {user_query}: {e}")
                return None

            # 处理流式响应
            for content in llm_stream:
                response_chunks.append(content)
                full_response = "".join(response_chunks)
                end_time = time.time()

                logger.debug(f"模型响应时间: {end_time - start_time} 秒, 生成token: {content}")

                # 分段处理
                should_segment, segment_index = is_segment_sentence(full_response, chunk_start)
                logger.debug(f"分段状态: {should_segment}, 分段索引: {segment_index}")

                if should_segment:
                    segment_text = full_response[chunk_start:segment_index + 1]
                    # 确保文本长度合理
                    if len(segment_text) > max(2, chunk_start):
                        future = self.execution_pool.submit(self.generate_and_play_speech, segment_text)
                        self.tts_processing_queue.put(future)
                        chunk_start = segment_index + 1

            # 处理剩余文本
            full_response = "".join(response_chunks)
            if chunk_start < len(full_response):
                remaining_text = full_response[chunk_start:]
                future = self.execution_pool.submit(self.generate_and_play_speech, remaining_text)
                self.tts_processing_queue.put(future)

        # 对话处理完成
        self.conversation_in_progress = False

        # 更新对话历史
        full_response = "".join(response_chunks)
        if self.dialogue_callback:
            self.dialogue_callback({"role": "assistant", "content": full_response})

        self.dialogue_history.put(Message(role="assistant", content=full_response))
        self.dialogue_history.dump_dialogue()

        logger.debug(json.dumps(self.dialogue_history.get_llm_dialogue(), indent=4, ensure_ascii=False))
        return True


if __name__ == "__main__":
    # 创建命令行参数解析器
    parser = argparse.ArgumentParser(description="智能语音助手")

    # Add arguments
    parser.add_argument('--config_path', type=str, help="配置文件", default="config/config.yaml")

    # Parse arguments
    args = parser.parse_args()
    config_path = args.config_path

    assistant = VoiceAssistant(config_path)
    assistant.run()





