"""
MCP服务管理器，用于管理多个MCP服务连接
"""
from typing import Dict, List, Optional, Any, Tuple
import asyncio
import time
import json
import datetime
from config.settings import MCP_CONFIG, MODEL, OPENAI_API_KEY, BASE_URL
from utils.mcp_worker import MCPWorker
from utils.logger import setup_logger
from utils.conversation_manager import ConversationManager
from models.schemas import Message
from openai import AsyncOpenAI
from utils.prompts import get_server_selection_prompt, get_merge_responses_prompt, get_merge_user_prompt

class MCPManager:
    """MCP服务管理器，用于管理多个MCP服务连接和对话"""
    
    def __init__(self):
        """初始化MCP服务管理器"""
        self.clients: Dict[str, MCPWorker] = {}
        self.available_servers = {}
        # 使用自动文件日志功能
        self.logger = setup_logger(name="mcp_manager", level="debug")
        self.conversation_manager = ConversationManager()
        self.openai_client = AsyncOpenAI(
            base_url=BASE_URL,
            api_key=OPENAI_API_KEY,
        )
        self.model = MODEL
        self._load_servers()
        
    def _load_servers(self):
        """从配置中加载服务器信息"""
        self.logger.info("加载MCP服务器配置")
        if "mcpServers" in MCP_CONFIG:
            self.available_servers = {
                server_id: server_config
                for server_id, server_config in MCP_CONFIG.get("mcpServers", {}).items()
                if server_config.get("isActive", False)
            }
            self.logger.info(f"找到 {len(self.available_servers)} 个活跃的MCP服务器")
            for server_id in self.available_servers:
                self.logger.debug(f"服务器ID: {server_id}")
        else:
            self.logger.warning("未找到MCP服务器配置")
    
    async def connect_all(self):
        """连接到所有活跃的MCP服务器"""
        self.logger.info("开始连接所有活跃的MCP服务器")
        connect_tasks = []
        for server_id, server_config in self.available_servers.items():
            if server_config.get("isActive", False):
                self.logger.debug(f"准备连接服务器: {server_id}")
                connect_tasks.append(self._connect_server(server_id, server_config))
        
        if connect_tasks:
            self.logger.debug(f"等待 {len(connect_tasks)} 个连接任务完成")
            await asyncio.gather(*connect_tasks)
            self.logger.info(f"已连接到 {len(self.clients)} 个MCP服务器")
        else:
            self.logger.warning("没有活跃的MCP服务器需要连接")
        
        return self.clients
    
    async def _connect_server(self, server_id: str, server_config: dict):
        """连接单个MCP服务器"""
        self.logger.info(f"连接服务器: {server_id}")
        client = MCPWorker()
        base_url = server_config.get("baseUrl")
        if not base_url:
            self.logger.error(f"服务器 {server_id} 缺少baseUrl配置")
            return
        
        try:
            await client.connect_to_sse_server(base_url)
            self.clients[server_id] = client
            self.logger.info(f"成功连接到服务器: {server_id}")
        except Exception as e:
            self.logger.error(f"连接服务器 {server_id} 失败: {str(e)}", exc_info=True)
    
    async def cleanup(self):
        """清理所有客户端连接"""
        self.logger.info("开始清理所有MCP服务器连接")
        cleanup_tasks = []
        for server_id, client in self.clients.items():
            self.logger.debug(f"准备清理服务器连接: {server_id}")
            cleanup_tasks.append(client.cleanup())
        
        if cleanup_tasks:
            self.logger.debug(f"等待 {len(cleanup_tasks)} 个清理任务完成")
            await asyncio.gather(*cleanup_tasks)
            self.logger.info("所有MCP服务器连接已清理")
        else:
            self.logger.debug("没有活跃的MCP服务器连接需要清理")
        
        self.clients.clear()
    
    def get_servers_info(self):
        """获取所有服务器信息"""
        self.logger.debug("获取服务器信息")
        server_info = [
            {
                "id": server_id,
                "description": config.get("description", ""),
                "baseUrl": config.get("baseUrl", ""),
                "isConnected": server_id in self.clients
            }
            for server_id, config in self.available_servers.items()
        ]
        self.logger.debug(f"返回 {len(server_info)} 个服务器信息")
        return server_info
    
    async def process_query(self, query: str, conversation_id: Optional[str] = None) -> Dict[str, Any]:
        """
        处理查询，智能选择一个或多个服务器并维护对话历史
        
        参数:
            query: 用户查询
            conversation_id: 对话ID，如果为None则创建新对话
            
        返回:
            处理结果
        """
        self.logger.info(f"处理查询，对话ID: {conversation_id or '新对话'}")
        
        # 检查连接的服务器
        if not self.clients:
            self.logger.error("没有连接到任何MCP服务器")
            return {
                "error": "没有连接到任何MCP服务器",
                "conversation_id": None
            }
        
        # 获取或创建对话
        if conversation_id is None:
            conversation_id = self.conversation_manager.create_conversation()
            self.logger.info(f"创建新对话: {conversation_id}")
            history = []
        else:
            history = self.conversation_manager.get_messages(conversation_id)
            if not history:  # 对话不存在
                conversation_id = self.conversation_manager.create_conversation()
                self.logger.info(f"无效对话ID，创建新对话: {conversation_id}")
                history = []
        
        # 根据当前查询内容选择合适的服务器
        servers_to_use = await self._select_servers(query, history)
        server_ids = [s[0] for s in servers_to_use]
        self.logger.info(f"为当前查询选择服务器: {', '.join(server_ids)}")
        
        # 更新对话关联的服务器 (使用主要服务器)
        if servers_to_use:
            primary_server_id = servers_to_use[0][0]
            self.conversation_manager.set_server_id(conversation_id, primary_server_id)
        
        # 处理查询
        try:
            # 如果只有一个服务器，直接调用
            if len(servers_to_use) == 1:
                server_id, client = servers_to_use[0]
                result = await client.process_query(query, history)
                result["server_id"] = server_id
            else:
                # 多服务器处理
                result = await self._process_with_multiple_servers(servers_to_use, query, history)
            
            # 更新对话历史
            # 添加用户消息
            self.conversation_manager.add_message(
                conversation_id,
                Message(role="user", content=query)
            )
            
            # 添加助手回复
            if "messages" in result:
                for msg in result["messages"]:
                    self.conversation_manager.add_message(
                        conversation_id,
                        Message(**msg)
                    )
            
            # 添加对话ID到结果
            result["conversation_id"] = conversation_id
            return result
            
        except Exception as e:
            self.logger.error(f"处理查询时出错: {str(e)}", exc_info=True)
            return {
                "error": f"处理查询时出错: {str(e)}",
                "conversation_id": conversation_id
            }
    
    async def _process_with_multiple_servers(
        self, servers: List[Tuple[str, MCPWorker]], query: str, history: List[Message]
    ) -> Dict[str, Any]:
        """
        使用多个服务器处理查询
        
        参数:
            servers: 服务器ID和客户端的元组列表
            query: 用户查询
            history: 对话历史
            
        返回:
            合并后的处理结果
        """
        self.logger.info(f"使用 {len(servers)} 个服务器并行处理查询")
        
        # 创建并行任务
        tasks = []
        for server_id, client in servers:
            self.logger.debug(f"创建服务器 {server_id} 的处理任务")
            tasks.append(self._process_on_server(server_id, client, query, history))
        
        # 等待所有任务完成
        results = await asyncio.gather(*tasks)
        
        # 合并结果
        combined_result = await self._combine_results(results, query)
        return combined_result
    
    async def _process_on_server(
        self, server_id: str, client: MCPWorker, query: str, history: List[Message]
    ) -> Dict[str, Any]:
        """
        在单个服务器上处理查询
        
        参数:
            server_id: 服务器ID
            client: 服务器客户端
            query: 用户查询
            history: 对话历史
            
        返回:
            处理结果和服务器ID
        """
        self.logger.info(f"在服务器 {server_id} 上处理查询")
        try:
            result = await client.process_query(query, history)
            result["server_id"] = server_id
            return result
        except Exception as e:
            self.logger.error(f"服务器 {server_id} 处理查询失败: {str(e)}", exc_info=True)
            return {
                "error": f"服务器 {server_id} 处理失败: {str(e)}",
                "server_id": server_id,
                "messages": [{"role": "assistant", "content": f"服务器 {server_id} 处理失败: {str(e)}"}],
                "tool_calls": []
            }
    
    async def _combine_results(self, results: List[Dict[str, Any]], original_query: str) -> Dict[str, Any]:
        """
        合并多个服务器的处理结果
        
        参数:
            results: 各服务器的处理结果列表
            original_query: 原始查询
            
        返回:
            合并后的结果
        """
        self.logger.info("合并多个服务器的处理结果")
        
        # 收集所有工具调用
        all_tool_calls = []
        server_messages = {}
        
        for result in results:
            server_id = result.get("server_id", "unknown")
            
            # 收集工具调用
            if "tool_calls" in result:
                for tool_call in result["tool_calls"]:
                    tool_call["server_id"] = server_id  # 添加服务器ID
                    all_tool_calls.append(tool_call)
            
            # 收集消息
            if "messages" in result:
                server_messages[server_id] = result["messages"]
        
        # 使用LLM合并响应
        combined_content = await self._merge_responses_with_llm(server_messages, original_query)
        
        return {
            "messages": [{"role": "assistant", "content": combined_content}],
            "tool_calls": all_tool_calls,
            "servers_used": [result.get("server_id") for result in results if "server_id" in result]
        }
    
    async def _merge_responses_with_llm(
        self, server_messages: Dict[str, List[Dict[str, Any]]], original_query: str
    ) -> str:
        """
        使用大模型合并多个服务器的响应
        
        参数:
            server_messages: 各服务器的消息列表字典
            original_query: 原始查询
            
        返回:
            合并后的回复内容
        """
        self.logger.info("使用大模型合并多个服务器的响应")
        
        try:
            # 获取合并提示词
            prompt = get_merge_responses_prompt(original_query, server_messages)
            user_prompt = get_merge_user_prompt()
            
            # 使用大模型合并
            response = await self.openai_client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "system", "content": prompt},
                    {"role": "user", "content": user_prompt}
                ],
            )
            
            merged_content = response.choices[0].message.content
            self.logger.debug(f"合并后的内容: {merged_content}")
            return merged_content
        except Exception as e:
            self.logger.error(f"使用大模型合并回复时出错: {str(e)}", exc_info=True)
            
            # 回退方案：简单连接所有回复
            fallback_content = "综合回复:\n\n"
            for server_id, messages in server_messages.items():
                server_content = ""
                for msg in messages:
                    if msg.get("role") == "assistant" and msg.get("content"):
                        server_content += msg.get("content", "") + " "
                
                if server_content:
                    fallback_content += f"• {server_content.strip()}\n\n"
            
            return fallback_content

    async def _select_servers(self, query: str, history: List[Message]) -> List[Tuple[str, MCPWorker]]:
        """
        智能选择一个或多个适合处理查询的服务器
        
        参数:
            query: 用户查询
            history: 对话历史
            
        返回:
            服务器ID和对应客户端的元组列表
        """
        # 如果只有一个服务器，直接使用
        if len(self.clients) == 1:
            server_id = next(iter(self.clients))
            self.logger.info(f"只有一个服务器可用，使用: {server_id}")
            return [(server_id, self.clients[server_id])]
        
        self.logger.debug(f"使用大模型智能选择服务器处理查询: {query}")
        
        # 构建服务器选择提示词
        prompt = self._build_multi_server_selection_prompt(query)
        self.logger.debug(f"服务器选择提示词: {prompt}")
        
        try:
            # 使用大模型判断应该使用哪些服务器
            response = await self.openai_client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "system", "content": prompt},
                    {"role": "user", "content": query}
                ],
            )
            
            # 从大模型回复中提取服务器ID
            content = response.choices[0].message.content
            self.logger.debug(f"大模型判断结果: {content}")
            
            # 解析大模型回复，获取多个服务器ID
            server_ids = self._parse_multi_server_selection_response(content)
            
            if server_ids:
                # 检查服务器ID是否有效并返回对应的客户端
                servers = []
                for server_id in server_ids:
                    if server_id in self.clients:
                        servers.append((server_id, self.clients[server_id]))
                        self.logger.info(f"选择服务器: {server_id}")
                
                if servers:
                    return servers
            
            # 如果解析失败，使用默认服务器
            self.logger.warning(f"无法从大模型回复中解析有效的服务器ID: {content}")
        except Exception as e:
            self.logger.error(f"大模型选择服务器时出错: {str(e)}", exc_info=True)
        
        # 如果大模型判断失败，使用第一个可用的服务器作为默认选择
        default_server_id = next(iter(self.clients))
        self.logger.info(f"使用默认服务器: {default_server_id}")
        return [(default_server_id, self.clients[default_server_id])]
    
    def _build_multi_server_selection_prompt(self, query: str) -> str:
        """
        构建支持多服务器选择的提示词
        
        参数:
            query: 用户查询
            
        返回:
            提示词
        """
        # 构建可用服务器信息
        servers_info = []
        for server_id, config in self.available_servers.items():
            if server_id in self.clients:
                servers_info.append({
                    "id": server_id,
                    "description": config.get("description", ""),
                })
        
        return get_server_selection_prompt(servers_info)
    
    def _parse_multi_server_selection_response(self, response: str) -> List[str]:
        """
        从大模型回复中解析多个服务器ID
        
        参数:
            response: 大模型回复
            
        返回:
            服务器ID列表
        """
        response = response.strip().lower()
        server_ids = []
        
        # 尝试直接匹配"服务器ID: xxx,yyy,zzz"格式
        chinese_patterns = ["服务器id", "服务器 id", "服务id", "服务 id"]
        english_patterns = ["server id", "serverid", "server", "id"]
        
        all_patterns = chinese_patterns + english_patterns
        for pattern in all_patterns:
            if pattern in response:
                # 查找冒号或其他分隔符
                for separator in [":", "：", "-", "是", "为"]:
                    if separator in response:
                        parts = response.split(separator, 1)
                        if len(parts) > 1:
                            id_part = parts[1].strip()
                            # 分割多个ID (处理逗号、空格、分号等分隔符)
                            for split_char in [",", "，", ";", "；", " ", "|"]:
                                if split_char in id_part:
                                    potential_ids = [id.strip() for id in id_part.split(split_char)]
                                    for potential_id in potential_ids:
                                        # 移除可能的标点符号
                                        clean_id = potential_id.strip(",.;:，。；：")
                                        if clean_id and clean_id in self.clients:
                                            server_ids.append(clean_id)
                                    if server_ids:
                                        return server_ids
                            
                            # 如果没有分隔符，则整个部分可能是一个ID
                            clean_id = id_part.strip(",.;:，。；：")
                            if clean_id and clean_id in self.clients:
                                server_ids.append(clean_id)
                                return server_ids
        
        # 如果上面的方法没有找到有效ID，尝试直接匹配服务器ID
        if not server_ids:
            for server_id in self.clients.keys():
                if server_id.lower() in response:
                    server_ids.append(server_id)
        
        return server_ids

    async def _select_best_server(self, query: str, history: List[Message]) -> Tuple[str, MCPWorker]:
        """
        使用大模型智能选择最适合处理查询的服务器
        
        参数:
            query: la用户查询
            history: 对话历史
            
        返回:
            服务器ID和对应的客户端
        """
        servers = await self._select_servers(query, history)
        if not servers:
            # 如果未能选择服务器，使用默认服务器
            default_server_id = next(iter(self.clients))
            return default_server_id, self.clients[default_server_id]
        return servers[0]  # 返回第一个选择的服务器
    
    def _build_server_selection_prompt(self, query: str) -> str:
        """
        构建单服务器选择提示词（向后兼容）
        
        参数:
            query: 用户查询
            
        返回:
            提示词
        """
        return self._build_multi_server_selection_prompt(query)
    
    def _parse_server_selection_response(self, response: str) -> Optional[str]:
        """
        从大模型回复中解析单个服务器ID（向后兼容）
        
        参数:
            response: 大模型回复
            
        返回:
            服务器ID或None
        """
        server_ids = self._parse_multi_server_selection_response(response)
        if server_ids:
            return server_ids[0]
        return None