# your_project_name/main_agent.py

import os
import yaml
import sys
import json
import time
import asyncio
import threading
import queue
import logging
import traceback
from pathlib import Path
from typing import TypedDict, Annotated, List, Optional
import operator
from datetime import datetime
from functools import partial

# LangChain/LangGraph 核心组件
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, ToolMessage
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode

# 工具模块导入
from tools import initialize_mcp_tools, close_mcp_tools

# 本地模块
from tts import StreamingTTS, switch_models, preload_tts
from llm import initialize_llm
from memory import (
    initialize_enhanced_graph_rag, 
    get_enhanced_rag_manager, 
    close_enhanced_graph_rag,
    MemoryType
)

# -----------------------------------
# 配置与路径
# -----------------------------------
BASE_DIR = Path(__file__).parent
CONFIG_FILE = BASE_DIR / "configs" / "llm_config.yaml"
HISTORY_DIR = BASE_DIR / "history"
CONTEXT_FILE = HISTORY_DIR / "conversation_history.json"
MEMORY_DIR = BASE_DIR / "memory_storage"

HISTORY_DIR.mkdir(exist_ok=True)
MEMORY_DIR.mkdir(exist_ok=True)

# -----------------------------------
# 日志设置
# -----------------------------------
def setup_mcp_debug_logger():
    """设置MCP调试日志记录器"""
    logger = logging.getLogger("mcp_debug")
    logger.setLevel(logging.DEBUG)
    
    if not logger.handlers:
        # 文件处理器 - 详细日志
        log_file = Path("mcp_debug_main.log")
        file_handler = logging.FileHandler(log_file, encoding='utf-8')
        file_formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s'
        )
        file_handler.setFormatter(file_formatter)
        file_handler.setLevel(logging.DEBUG)
        logger.addHandler(file_handler)
        
    return logger

# 初始化MCP调试日志记录器
mcp_debug_logger = setup_mcp_debug_logger()

# -----------------------------------
# 非阻塞用户输入处理
# -----------------------------------
class NonBlockingInput:
    """非阻塞用户输入处理器"""
    
    def __init__(self):
        self.input_queue = queue.Queue()
        self.input_thread = None
        self.stop_event = threading.Event()
        self.interrupt_flag = threading.Event()  # 添加中断标志
        self._start_input_thread()
    
    def _start_input_thread(self):
        """启动输入线程"""
        self.input_thread = threading.Thread(target=self._input_worker, daemon=True)
        self.input_thread.start()
    
    def _input_worker(self):
        """输入工作线程"""
        while not self.stop_event.is_set():
            try:
                user_input = input("用户: ")
                if not self.stop_event.is_set():
                    self.input_queue.put(user_input)
            except EOFError:
                break
            except KeyboardInterrupt:
                # 处理键盘中断，设置停止事件
                self.stop_event.set()
                break
            except Exception:
                break
    
    def get_input(self, timeout: float = 0.1) -> Optional[str]:
        """非阻塞获取用户输入"""
        try:
            return self.input_queue.get(timeout=timeout)
        except queue.Empty:
            return None
    
    def clear_queue(self):
        """清空输入队列"""
        while not self.input_queue.empty():
            try:
                self.input_queue.get_nowait()
            except queue.Empty:
                break
    
    def set_interrupt(self):
        """设置中断标志"""
        self.interrupt_flag.set()
    
    def clear_interrupt(self):
        """清除中断标志"""
        self.interrupt_flag.clear()
    
    def is_interrupted(self):
        """检查是否被中断"""
        return self.interrupt_flag.is_set()
    
    def stop(self):
        """停止输入处理"""
        self.stop_event.set()
        if self.input_thread and self.input_thread.is_alive():
            self.input_thread.join(timeout=1)

# -----------------------------------
# 历史记录管理函数 (无变化)
# -----------------------------------
def load_history(filepath: str) -> List[dict]:
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            return json.load(f)
    except (FileNotFoundError, json.JSONDecodeError):
        return []

def save_history(filepath: str, history: List[dict]):
    try:
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(history, f, indent=4, ensure_ascii=False)
    except Exception as e:
        print(f"错误: 无法保存历史文件 {filepath}: {e}")

def cleanup_old_history(history: List[dict], max_entries: int = 50) -> List[dict]:
    """清理历史记录 - 减少保留数量"""
    if len(history) <= max_entries:
        return history
    cleaned = history[-max_entries:]
    print(f"✓ 历史记录已清理: {len(history)} → {len(cleaned)}")
    return cleaned

def convert_to_messages(history: List[dict], context_config: dict, system_prompt: str = None) -> List[BaseMessage]:
    """转换历史记录为消息格式 - 简化版"""
    messages = []
    
    # 添加系统提示词
    if system_prompt:
        from langchain_core.messages import SystemMessage
        messages.append(SystemMessage(content=system_prompt))
    
    # 检查是否启用上下文
    if not context_config.get('enable_context', True):
        return messages
    
    # 获取最近的对话历史
    max_turns = context_config.get('max_turns', 10)
    relevant_history = [e for e in history if e.get('type') in ['human', 'AIMessage']][-max_turns*2:]
    
    # 转换为消息
    for entry in relevant_history:
        if entry.get('type') == 'human':
            messages.append(HumanMessage(content=entry.get('content', '')))
        elif entry.get('type') == 'AIMessage':
            messages.append(AIMessage(
                content=entry.get('content', ''), 
                tool_calls=entry.get('tool_calls', [])
            ))
    
    return messages

# -----------------------------------
# LangGraph 状态与节点 (结构优化)
# -----------------------------------
class AgentState(TypedDict):
    messages: Annotated[List[BaseMessage], operator.add]
    t0_time: Optional[float]  # 用户输入时间戳


async def agent_node(state: AgentState, llm, streaming_tts: StreamingTTS, input_handler: NonBlockingInput = None): 
    """Agent节点 - 简化版,减少复杂的记忆检索"""
    messages = state["messages"]
    t0_time = state.get("t0_time", time.time())
    print("\nAI 思考中...")

    # 设置TTS时间
    if streaming_tts.is_tts_available:
        streaming_tts.set_user_input_time(t0_time)

    # 简化RAG检索 - 减少不必要的上下文注入
    # 只在明确需要记忆的情况下检索
    # if messages and isinstance(messages[-1], HumanMessage):
    #     user_input = messages[-1].content
    #     rag_manager = await get_enhanced_rag_manager()
    #     if rag_manager and await rag_manager.rag_retriever.should_use_memory(user_input):
    #         rag_context = await rag_manager.retrieve_context(user_input)
    #         if rag_context and rag_context.get('context'):
    #             print(f"💡 已检索相关记忆")
    #             # 不直接替换消息,而是添加系统提示
    #             context_note = f"\n[参考历史记忆: {rag_context['context'][:200]}...]"
    #             messages[-1] = HumanMessage(content=user_input + context_note)

    stream_generator = llm.astream(messages)
    full_response_content = ""
    full_response_message = None
    interrupted = False

    print("AI: ", end="", flush=True)

    # 创建实时文本流生成器，支持中断机制
    async def realtime_text_stream():
        nonlocal full_response_content, full_response_message, interrupted
        
        try:
            async for chunk in stream_generator:
                # 检查是否有用户输入中断（任何新输入都会中断）
                if input_handler:
                    # 首先检查中断标志
                    if input_handler.is_interrupted():
                        print(f"\n[系统] 检测到中断标志，正在中断上一轮对话...")
                        interrupted = True
                        break
                    
                    # 然后检查用户输入（任何新输入都会中断）
                    user_input = input_handler.get_input(timeout=0.005)  # 更频繁的检查
                    if user_input is not None:
                        print(f"\n[系统] 检测到新输入，正在中断上一轮对话...")
                        interrupted = True
                        # 设置中断标志
                        input_handler.set_interrupt()
                        # 清空输入队列，避免重复处理
                        input_handler.clear_queue()
                        break
                
                if chunk.content:
                    # 流式输出格式：实时打印 chunk.content
                    print(chunk.content, end="", flush=True)
                    full_response_content += chunk.content
                    # 实时传递给TTS
                    yield chunk.content

                if full_response_message is None:
                    full_response_message = chunk
                else:
                    full_response_message += chunk
        except Exception as e:
            # 如果流式生成过程中出现异常，也标记为中断
            if not interrupted:
                print(f"\n[系统] 流式生成异常: {e}")
                interrupted = True

    # 启动实时流式TTS处理（仅在TTS可用时）
    if streaming_tts.is_tts_available:
        # 使用真正的流式处理，边输出边处理TTS
        await streaming_tts.process_realtime_stream(realtime_text_stream())
    else:
        # TTS不可用时，只消费LLM流
        async for chunk in realtime_text_stream():
            pass
    
    print() # 换行

    if interrupted:
        print("对话已被用户中断，跳过工具调用。")
        return {"messages": []}  # 返回空消息，表示被中断

    if full_response_message and full_response_message.tool_calls:
        print(f"\n{'='*60}")
        print(f"🔧 LLM 决定调用 {len(full_response_message.tool_calls)} 个工具")
        print(f"{'='*60}")
        for i, call in enumerate(full_response_message.tool_calls, 1):
            print(f"[{i}] {call.get('name')} | 参数: {call.get('args')}")
        print(f"{'='*60}\n")

    return {"messages": [full_response_message]}

# -----------------------------------
# 初始化函数
# -----------------------------------
def load_config(config_path: str):
    """加载配置文件的兼容函数"""
    try:
        # 尝试使用统一配置管理器
        from configs import get_llm_config
        return get_llm_config()
    except ImportError:
        # 回退到直接读取文件
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                return yaml.safe_load(f)
        except FileNotFoundError:
            print(f"错误: 找不到配置文件 {config_path}")
            return None


def initialize_tts(config: dict) -> StreamingTTS:
    """初始化流式TTS处理器"""
    tts_config = config.get('tts_config', {})
    
    # 构建TTS配置
    tts_runtime_config = {
        "API_HOST": tts_config.get("API_HOST"),
        "TTS_URL": f"{tts_config.get('API_HOST')}{tts_config.get('TTS_URL', '/tts')}",
        "CONTROL_URL": f"{tts_config.get('API_HOST')}/control",
        "SAMPLE_RATE": tts_config.get("SAMPLE_RATE"),
        "CHUNK_SIZE": tts_config.get("CHUNK_SIZE"),
        "TTS_PARAMS": {
            **tts_config.get('tts_params_template', {}),
            "ref_audio_path": tts_config.get("REF_AUDIO_PATH"),
            "prompt_text": tts_config.get("PROMPT_TEXT"),
            "streaming_mode": True
        }
    }
    
    print("\n--- 启动流式TTS初始化阶段 ---")
    
    # 创建TTS实例
    streaming_tts = StreamingTTS(tts_runtime_config)
    
    # 首先检查TTS服务连接
    if not streaming_tts.check_tts_availability():
        print("❌ TTS服务连接失败，将禁用TTS功能")
        return streaming_tts
    
    # 模型切换（可选）
    gpt_path = tts_config.get("GPT_MODEL_PATH")
    sovits_path = tts_config.get("SOVITS_MODEL_PATH")
    if gpt_path and sovits_path:
        if not switch_models(tts_runtime_config["API_HOST"], gpt_path, sovits_path):
            print("警告: TTS 模型切换失败，继续使用默认模型。")
            # 如果模型切换失败，检查TTS是否仍然可用
            if not streaming_tts.check_tts_availability():
                print("❌ TTS服务连接失败，将禁用TTS功能")
                return streaming_tts
    
    # 模型预热
    if not preload_tts(tts_runtime_config["TTS_URL"], tts_runtime_config["TTS_PARAMS"], "你好"):
        print("❌ TTS模型预热失败，将禁用TTS功能")
        # 如果预热失败，检查TTS是否仍然可用
        if not streaming_tts.check_tts_availability():
            print("❌ TTS服务连接失败，将禁用TTS功能")
            return streaming_tts
    
    print("--- 流式TTS初始化阶段完成 ---\n")
    
    return streaming_tts

# -----------------------------------
# LangGraph 构建 (结构优化)
# -----------------------------------
def should_continue(state: AgentState):
    # 如果消息列表为空，说明被中断了，直接结束
    if not state["messages"]:
        return "end"
    
    # 检查最后一条消息是否是AI消息且有工具调用
    last_message = state["messages"][-1]
    if isinstance(last_message, AIMessage) and last_message.tool_calls:
        return "tools"
    else:
        return "end"

async def tool_node_with_interrupt_check(state: AgentState, tools: list, input_handler: NonBlockingInput):
    """带中断检查的工具节点"""
    # 检查是否被中断
    if input_handler and input_handler.is_interrupted():
        print("检测到中断，跳过工具调用。")
        return {"messages": []}
    
    # 正常执行工具调用
    try:
        tool_node = ToolNode(tools)
        result = await tool_node.ainvoke(state)
        mcp_debug_logger.debug(f"工具调用完成")
        return {"messages": result.get("messages", [])}
    except Exception as e:
        error_msg = f"工具调用失败: {str(e)}"
        mcp_debug_logger.error(error_msg)
        mcp_debug_logger.error(traceback.format_exc())
        print(f"❌ {error_msg}")
        return {"messages": []}

async def build_agent_graph(llm, tools: list, streaming_tts: StreamingTTS, input_handler: NonBlockingInput):
    # 绑定工具到LLM
    if tools:
        print(f"\n🔧 已绑定 {len(tools)} 个工具到LLM")
        for i, tool in enumerate(tools, 1):
            desc = tool.description[:80] + "..." if len(tool.description) > 80 else tool.description
            print(f"  [{i}] {tool.name}: {desc}")
        print()
        llm_with_tools = llm.bind_tools(tools)
    else:
        print("⚠️ 没有工具可绑定")
        llm_with_tools = llm
    
    # 使用 functools.partial 来绑定额外参数，这是更规范的做法
    agent_node_with_tools = partial(agent_node, llm=llm_with_tools, streaming_tts=streaming_tts, input_handler=input_handler)
    
    workflow = StateGraph(AgentState)
    workflow.add_node("agent", agent_node_with_tools)
    
    # 修复：只有当工具列表不为空时才添加工具节点
    if tools:
        # 使用带中断检查的工具节点
        tool_node_with_check = partial(tool_node_with_interrupt_check, tools=tools, input_handler=input_handler)
        workflow.add_node("tools", tool_node_with_check)
        workflow.add_conditional_edges("agent", should_continue, {"tools": "tools", "end": END})
        workflow.add_edge('tools', 'agent')
    else:
        # 如果没有工具，直接结束
        workflow.add_conditional_edges("agent", lambda state: "end", {"end": END})
    
    workflow.set_entry_point("agent")
    return workflow.compile()

# -----------------------------------
# 主函数 (逻辑修复)
# -----------------------------------
async def main():
    config = load_config(CONFIG_FILE)
    if not config: return

    try:
        # 启动MCP HTTP服务器
        print("正在启动MCP HTTP服务器...")
        from server import start_server_in_background
        server_thread = start_server_in_background()
        print("✅ MCP HTTP服务器已启动 (端口: 7888)")
        
        llm = initialize_llm(config)
        streaming_tts = initialize_tts(config)
        tools = await initialize_mcp_tools(BASE_DIR)
        
        # 初始化增强Graph RAG系统
        print("正在初始化增强Graph RAG记忆系统...")
        try:
            rag_manager = await initialize_enhanced_graph_rag(MEMORY_DIR)
            if not rag_manager or not rag_manager.is_initialized:
                print("⚠️  RAG系统初始化失败，将禁用RAG功能，但程序可以继续运行")
        except Exception as e:
            print(f"⚠️  RAG系统初始化出现异常: {e}")
            import traceback
            traceback.print_exc()
            print("程序将继续运行，但RAG功能将被禁用")
            rag_manager = None
        
        # 初始化非阻塞输入处理器
        print("初始化输入处理器...")
        input_handler = NonBlockingInput()
        print("✅ 输入处理器初始化成功")
        
        # 构建Agent图
        print("构建Agent图...")
        app = await build_agent_graph(llm, tools, streaming_tts, input_handler)
        print("✅ Agent图构建成功")
    except Exception as e:
        print(f"初始化失败: {e}")
        return

    print("\n--- 代理运行中 (输入 'q' 或 '退出' 结束) ---")
    global_history = load_history(CONTEXT_FILE)
    
    # 清理过期的历史记录，减少文件大小
    global_history = cleanup_old_history(global_history, max_entries=100)

    while True:
        try:
            # 检查输入处理器是否已停止
            if input_handler.stop_event.is_set():
                print("\n输入处理器已停止，正在退出...")
                break
                
            # 非阻塞获取用户输入（更频繁的检查）
            user_input = input_handler.get_input(timeout=0.05)  # 0.05秒超时，更频繁检查
            if user_input is None:
                # 即使没有输入，也检查中断标志
                if input_handler.is_interrupted():
                    print("检测到中断标志，正在处理...")
                    # 中断TTS播放
                    if streaming_tts.is_tts_available:
                        streaming_tts.interrupt()
                    # 清除中断标志
                    input_handler.clear_interrupt()
                continue  # 没有输入，继续循环
            
            # 任何新的有效输入都会中断当前处理
            print(f"收到新输入: {user_input}")
            
            # 输入验证：检查空输入或只有空格
            if not user_input or not user_input.strip():
                print("请输入有效内容（不能为空或只有空格）")
                continue
            
            if user_input.lower() in ["q", "退出", "exit", "quit"]:
                break
            
            # 记忆管理命令 - 简化版
            if user_input.lower() == "/memory_stats":
                rag_manager = await get_enhanced_rag_manager()
                if rag_manager:
                    stats = await rag_manager.get_memory_stats()
                    print(f"记忆统计: {stats}")
                continue
            elif user_input.lower().startswith("/clear"):
                rag_manager = await get_enhanced_rag_manager()
                if rag_manager:
                    if "short" in user_input:
                        await rag_manager.clear_memory(MemoryType.SHORT_TERM)
                    elif "long" in user_input:
                        await rag_manager.clear_memory(MemoryType.LONG_TERM)
                    elif "temporal" in user_input:
                        await rag_manager.clear_memory(MemoryType.TEMPORAL)
                    else:
                        await rag_manager.clear_memory()
                    print("✓ 记忆已清空")
                continue

            t0_time = time.time()
            # 清除中断标志，开始新的对话轮次
            input_handler.clear_interrupt()
            # 任何新的有效输入都会中断当前处理
            print("正在中断当前处理，开始新的对话...")
            # 中断TTS播放
            if streaming_tts.is_tts_available:
                streaming_tts.interrupt()
            # 清空输入队列，避免重复处理
            input_handler.clear_queue()

            current_user_message = HumanMessage(content=user_input)
            user_msg_dict = current_user_message.model_dump()
            user_msg_dict['type'] = 'human'
            user_msg_dict['timestamp'] = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
            global_history.append(user_msg_dict)

            # 获取上下文配置和系统提示词
            context_config = config.get('model_config', {}).get('context_config', {})
            system_prompt = config.get('model_config', {}).get('system_prompt')
            context_messages = convert_to_messages(global_history, context_config, system_prompt)
            initial_state = AgentState(messages=context_messages, t0_time=t0_time)

            final_state = await app.ainvoke(initial_state)
            
            # 保存消息到历史
            if final_state["messages"]:
                final_message = final_state["messages"][-1]
                
                if isinstance(final_message, (AIMessage, ToolMessage)):
                    msg_dict = final_message.model_dump()
                    msg_dict['type'] = 'AIMessage' if isinstance(final_message, AIMessage) else 'ToolMessage'
                    msg_dict['timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    global_history.append(msg_dict)
                    
                    # 简化RAG记忆存储 - 只存储有价值的对话
                    if isinstance(final_message, AIMessage) and final_message.content:
                        if rag_manager and len(final_message.content) > 10:
                            await rag_manager.process_conversation(user_input, final_message.content)

            # 修复：将同步的文件I/O操作放入线程池执行，防止阻塞主事件循环
            await asyncio.to_thread(save_history, CONTEXT_FILE, global_history)

        except KeyboardInterrupt:
            if streaming_tts.is_tts_available:
                streaming_tts.interrupt()
            print("\n\n检测到中断信号，正在退出...")
            input_handler.stop()
            break
        except Exception as e:
            print(f"\n❌ 发生错误: {e}")
            mcp_debug_logger.error(f"主循环错误: {e}\n{traceback.format_exc()}")
            # 不要因为单个错误就退出，继续运行
            continue

    # 退出前，确保清理所有后台任务
    print("\n正在清理资源...")
    
    # 1. 停止输入处理器
    input_handler.stop()
    
    # 2. 清理TTS资源
    if streaming_tts.is_tts_available:
        try:
            streaming_tts.interrupt()
            await asyncio.wait_for(streaming_tts.wait_for_completion(), timeout=3.0)
            streaming_tts.cleanup()
        except asyncio.TimeoutError:
            print("TTS清理超时")
        except Exception as e:
            print(f"TTS清理错误: {e}")
    
    # 3. 关闭MCP连接
    try:
        await asyncio.wait_for(close_mcp_tools(), timeout=5.0)
    except asyncio.TimeoutError:
        print("MCP关闭超时")
    except Exception as e:
        print(f"MCP关闭错误: {e}")
    
    # 4. 关闭增强Graph RAG系统
    try:
        await close_enhanced_graph_rag()
    except Exception as e:
        print(f"RAG关闭错误: {e}")
    
    # 5. 保存历史记录
    try:
        await asyncio.to_thread(save_history, CONTEXT_FILE, global_history)
    except Exception as e:
        print(f"保存历史错误: {e}")
    
    print("✓ 代理已退出")

if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        print("\n程序被中断，退出。")