import os
import sys

# 添加项目根目录到Python路径，确保能找到app模块
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

import json
from openai import OpenAI
import asyncio
from fastmcp import Client

# 导入配置文件
from app.config.config import MODEL_CONFIGS, DEFAULT_MODEL_PROVIDER, CURRENT_MODEL_CONFIG, SYSTEM_PROMPT
# 导入MCP配置加载器
from app.core.mcp_config_loader import MCPConfigLoader
from app.utils.logger import Logger

logger = Logger("ai_core")
# --- 1. 配置管理 ---
class ConfigManager:
    """配置管理器"""
    def __init__(self):
        """初始化配置管理器"""
        # 豆包模型配置 - 从配置文件获取默认值
        self.ark_base_url = os.environ.get("ARK_BASE_URL", CURRENT_MODEL_CONFIG["base_url"])
        self.api_key = os.environ.get("YOUR_API_KEY", CURRENT_MODEL_CONFIG["api_key"])
        self.model_name = os.environ.get("MODEL_NAME", CURRENT_MODEL_CONFIG["default_model"])

# 初始化配置
config = ConfigManager()

# --- 2. FastMCP客户端 ---
class FastMCPHandler:
    """FastMCP客户端处理器，支持多个MCP服务器"""
    # 类变量，所有实例共享同一个工具列表和工具映射
    tools_info = []
    tool_to_client = {}
    
    def __init__(self, mcp_clients):
        """
        初始化FastMCP处理器
        
        :param mcp_clients: 列表，包含所有MCP客户端信息，格式：[{config_name, server_name, client}]
        """
        self.mcp_clients = mcp_clients
    
    async def get_tools(self):
        """获取所有FastMCP服务器的工具列表"""
        # 检查是否已经获取过工具列表
        if self.tools_info:
            return self.tools_info
            
        # 清空现有工具列表，避免重复添加
        self.tools_info.clear()
        self.tool_to_client.clear()
        
        for mcp_info in self.mcp_clients:
            client = mcp_info['client']
            config_name = mcp_info['config_name']
            server_name = mcp_info['server_name']
            
            try:
                # 使用上下文管理器连接
                async with client:
                    tools = await client.list_tools()
                    server_tools = [
                        {
                            "name": tool.name,
                            "description": tool.description,
                            "config_name": config_name,
                            "server_name": server_name
                        }
                        for tool in tools
                    ]
                    
                    # 检查工具是否已经存在，避免重复添加
                    for tool in server_tools:
                        if tool['name'] not in self.tool_to_client:
                            self.tools_info.append(tool)
                            self.tool_to_client[tool["name"]] = mcp_info
            except Exception as e:
                print(f"[Error] 从服务器 '{config_name}:{server_name}' 获取工具列表失败: {e}")
                continue
        
        print(f"[Info] 成功获取到 {len(self.tools_info)} 个FastMCP工具")
        # 只在首次获取时打印工具列表，避免重复打印
        if len(self.tools_info) > 0:
            for tool in self.tools_info:
                print(f"  - {tool['name']} ({tool['config_name']}:{tool['server_name']}): {tool['description']}")
        
        return self.tools_info
    
    def get_tool_description(self, tool_name):
        """根据工具名称获取工具描述"""
        for tool in self.tools_info:
            if tool['name'] == tool_name:
                return tool['description']
        return f"工具 {tool_name}"
    
    async def call_tool(self, tool_name, params):
        """调用指定的FastMCP工具"""
        # 检查工具是否存在
        mcp_info = self.tool_to_client.get(tool_name)
        if not mcp_info:
            # 只有在工具列表为空时才重新获取工具
            if not self.tools_info:
                await self.get_tools()
                mcp_info = self.tool_to_client.get(tool_name)
            
            if not mcp_info:
                print(f"[Error] 找不到工具 {tool_name} 对应的客户端")
                return {"error": "TOOL_NOT_FOUND", "message": f"找不到工具 {tool_name} 对应的客户端"}
        
        client = mcp_info['client']
        config_name = mcp_info['config_name']
        server_name = mcp_info['server_name']
        
        try:
            # 使用上下文管理器连接
            async with client:
                result = await client.call_tool(tool_name, params)
                
                # 提取结构化结果
                if hasattr(result, 'structured_content') and result.structured_content:
                    return result.structured_content
                elif hasattr(result, 'data') and result.data:
                    return result.data
                else:
                    return {"result": str(result)}
        except Exception as e:
            print(f"[Error] 调用工具 '{tool_name}' 失败: {e}")
            return {"error": "TOOL_CALL_ERROR", "message": f"调用工具 '{tool_name}' 失败: {str(e)}"}

# 初始化FastMCP处理器，使用原始代码库的MCP配置加载器
config_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'config')
loader = MCPConfigLoader(config_dir)
configs = loader.load_all_configs()
mcp_clients = loader.initialize_clients()
fastmcp_handler = FastMCPHandler(mcp_clients)

# --- 3. AI Core核心逻辑 ---
class AICore:
    """AI Core核心类"""
    def __init__(self, mcp_configs=None, mcp_clients=None, model_provider=None):
        """初始化AI Core"""
        # 初始化配置
        self.config = ConfigManager()
        
        # 初始化OpenAI客户端
        self.openai_client = OpenAI(
            base_url=self.config.ark_base_url,
            api_key=self.config.api_key
        )
        
        # 添加异步客户端
        self.async_openai_client = OpenAI(
            base_url=self.config.ark_base_url,
            api_key=self.config.api_key
        )
        
        # 初始化MCP相关组件
        self.mcp_configs = mcp_configs if mcp_configs is not None else {}
        self.mcp_clients = mcp_clients if mcp_clients is not None else []
        
        # 初始化FastMCP处理器
        self.fastmcp_handler = FastMCPHandler(self.mcp_clients)
        
        # 初始化工具列表
        self.tools = []
        
        # 初始化系统提示词，使用从配置文件导入的SYSTEM_PROMPT
        self.system_prompt = SYSTEM_PROMPT
        
        # 初始化记忆体管理器
        from app.core.memory_manager import MemoryManager
        self.memory_manager = MemoryManager(20)
        
    async def generate_tools_schema(self):
        """动态生成工具规范"""
        # 获取FastMCP工具列表
        tools = await self.fastmcp_handler.get_tools()
        
        # 如果没有工具，返回空列表
        if not tools:
            print("[Info] 没有可用的FastMCP工具，将不生成工具规范")
            return []
        
        # 创建通用的FastMCP工具调用函数
        return [
            {
                "type": "function",
                "function": {
                    "name": "call_fastmcp_tool",
                    "description": "调用FastMCP工具执行特定任务。",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "tool_name": {
                                "type": "string",
                                "description": f"要调用的工具名称，可选工具包括：{', '.join([tool['name'] for tool in tools])}"
                            },
                            "params": {
                                "type": "object",
                                "description": "传递给工具的参数，格式为键值对",
                                "additionalProperties": True
                            }
                        },
                        "required": ["tool_name"]
                    }
                }
            }
        ]
    
    # 调用MCP工具
    async def call_mcp_tool(self, tool_name: str, tool_params: dict):
        """调用MCP工具"""
        return await self.fastmcp_handler.call_tool(tool_name, tool_params)
    
    async def process_query(self, query, stream=False, enable_tools=True, websocket=None, send_result=True, update_memory=True):
        """处理用户查询"""
        print(f"[AI Core] 处理用户查询: {query}")
        
        # 构造消息，包含系统提示词和记忆体
        messages = [{"role": "system", "content": self.system_prompt}]
        # 只有在启用记忆体且需要更新记忆体时才添加历史对话
        if update_memory:
            messages.extend(self.memory_manager.get_memory())
        messages.append({"role": "user", "content": query})
        
        # 循环处理工具调用，直到没有更多工具调用或达到最大迭代次数
        max_iterations = 5  # 最大迭代次数
        iteration = 0
        result = ""  # 初始化变量
        
        while iteration < max_iterations:
            # 调用模型，决定是否调用工具
            response = self.openai_client.chat.completions.create(
                model=self.config.model_name,
                messages=messages,
                tools=self.tools if enable_tools and self.tools else None,
                tool_choice="auto" if enable_tools and self.tools else None
            )
            
            response_message = response.choices[0].message
            messages.append(response_message)
            
            # 获取AI响应内容
            full_response = response_message.content or ""
            
            # 获取工具调用
            tool_calls = response_message.tool_calls
            
            if tool_calls:
                tool_messages = messages.copy()
                
                # 处理每个工具调用
                for tool_call in tool_calls:
                    function_name = tool_call.function.name
                    function_args = json.loads(tool_call.function.arguments)
                    
                    if function_name == "call_fastmcp_tool":
                        # 调用FastMCP工具
                        tool_name = function_args.get("tool_name")
                        params = function_args.get("params", {})
                        
                        # 复制参数字典，避免修改原始数据
                        processed_params = params.copy()
                        
                        # 通用参数处理逻辑
                        # 1. 处理列表类型参数，只取第一个元素（如果是列表）
                        for key, value in list(processed_params.items()):
                            if isinstance(value, list) and len(value) > 0:
                                processed_params[key] = value[0]
                        
                        # 2. 实现通用的参数名称转换：下划线命名法(snake_case) -> 驼峰命名法(camelCase)
                        # 这是一种通用的参数转换规则，适用于大多数工具
                        converted_params = {}
                        for key, value in processed_params.items():
                            # 将下划线命名转换为驼峰命名
                            camel_key = ''.join(word.capitalize() if i > 0 else word 
                                              for i, word in enumerate(key.split('_')))
                            converted_params[camel_key] = value
                        
                        # 3. 合并原始参数和转换后的参数，转换后的参数优先级更高
                        # 这样可以保留原始参数，同时支持转换后的参数
                        processed_params.update(converted_params)
                        
                        # 4. 处理通用参数映射（保留原有逻辑，确保向后兼容）
                        # 如果存在query参数但没有search_query参数，将query转换为search_query
                        if "query" in processed_params and "search_query" not in processed_params:
                            processed_params["search_query"] = processed_params.pop("query")
                        # 如果存在from/to参数但没有origins/destinations参数，进行转换
                        if "from" in processed_params and "origins" not in processed_params:
                            processed_params["origins"] = processed_params.pop("from")
                        if "to" in processed_params and "destinations" not in processed_params:
                            processed_params["destinations"] = processed_params.pop("to")
                        
                        # 5. 为缺失的必填参数提供默认值
                        # 对于搜索类工具，如果没有搜索关键词，使用用户输入
                        if tool_name.startswith("webSearch") and "search_query" not in processed_params:
                            processed_params["search_query"] = query
                        
                        # 获取工具描述，生成友好提示
                        tool_description = self.fastmcp_handler.get_tool_description(tool_name)
                        print(f"\n[AI Core] 正在调用工具: {tool_name} ({tool_description}) with params: {processed_params}")
                        tool_result = await self.fastmcp_handler.call_tool(tool_name, processed_params)
                        ##print(f"[AI Core] 工具返回结果: {tool_result}")
                        
                        # 将工具结果添加到对话历史
                        tool_messages.append({
                            "tool_call_id": tool_call.id,
                            "role": "tool",
                            "name": function_name,
                            "content": json.dumps(tool_result, ensure_ascii=False)
                        })
                
                # 更新消息列表，添加工具调用结果
                messages = tool_messages + [{"role": "user", "content": f"请基于以上工具调用结果，用自然语言回答最初的问题。"}]
                iteration += 1
                continue  # 继续下一次迭代
            else:
                # 没有工具调用，返回响应
                result = full_response
                break
        
        # 添加到记忆体
        if update_memory:
            self.memory_manager.add_to_memory("user", query)
            self.memory_manager.add_to_memory("assistant", result)
        
        return result
    
    async def process_query_stream(self, query, enable_tools=True, websocket=None, use_memory=True):
        """流式处理用户查询"""
        print(f"[AI Core] 流式处理用户查询: {query}")
        
        # 构造消息，包含系统提示词和记忆体
        messages = [{"role": "system", "content": self.system_prompt}]
        # 只有在启用记忆体且需要更新记忆体时才添加历史对话
        if use_memory:
            messages.extend(self.memory_manager.get_memory())
        messages.append({"role": "user", "content": query})
        
        # 循环处理工具调用，直到没有更多工具调用或达到最大迭代次数
        max_iterations = 5  # 最大迭代次数
        iteration = 0
        final_result = ""  # 初始化变量
        
        while iteration < max_iterations:
            # 调用模型，决定是否调用工具
            response = self.openai_client.chat.completions.create(
                model=self.config.model_name,
                messages=messages,
                tools=self.tools if enable_tools and self.tools else None,
                tool_choice="auto" if enable_tools and self.tools else None
            )
            
            response_message = response.choices[0].message
            messages.append(response_message)
            
            # 获取AI响应内容
            full_response = response_message.content or ""
            
            # 获取工具调用
            tool_calls = response_message.tool_calls
            
            if tool_calls:
                # 处理每个工具调用
                for tool_call in tool_calls:
                    function_name = tool_call.function.name
                    function_args = json.loads(tool_call.function.arguments)
                    
                    if function_name == "call_fastmcp_tool":
                        # 调用FastMCP工具
                        tool_name = function_args.get("tool_name")
                        params = function_args.get("params", {})
                        
                        # 复制参数字典，避免修改原始数据
                        processed_params = params.copy()
                        
                        # 通用参数处理逻辑
                        # 1. 处理列表类型参数，只取第一个元素（如果是列表）
                        for key, value in list(processed_params.items()):
                            if isinstance(value, list) and len(value) > 0:
                                processed_params[key] = value[0]
                        
                        # 2. 实现通用的参数名称转换：下划线命名法(snake_case) -> 驼峰命名法(camelCase)
                        converted_params = {}
                        for key, value in processed_params.items():
                            camel_key = ''.join(word.capitalize() if i > 0 else word 
                                              for i, word in enumerate(key.split('_')))
                            converted_params[camel_key] = value
                        
                        # 3. 合并原始参数和转换后的参数，转换后的参数优先级更高
                        processed_params.update(converted_params)
                        
                        # 4. 处理通用参数映射
                        if "query" in processed_params and "search_query" not in processed_params:
                            processed_params["search_query"] = processed_params.pop("query")
                        if "from" in processed_params and "origins" not in processed_params:
                            processed_params["origins"] = processed_params.pop("from")
                        if "to" in processed_params and "destinations" not in processed_params:
                            processed_params["destinations"] = processed_params.pop("to")
                        
                        # 5. 为缺失的必填参数提供默认值
                        if tool_name.startswith("webSearch") and "search_query" not in processed_params:
                            processed_params["search_query"] = query
                        # 为天气查询工具添加默认城市参数
                        elif tool_name == "maps_weather" and "city" not in processed_params and "adcode" not in processed_params:
                            processed_params["city"] = "北京"
                        
                        # 获取工具描述，生成友好提示
                        tool_description = self.fastmcp_handler.get_tool_description(tool_name)
                        print(f"\n[AI Core] 正在调用工具: {tool_name} ({tool_description}) with params: {processed_params}")
                        
                        # 生成通用的友好提示，通过WebSocket发送给客户端
                        try:
                            # 生成通用的友好提示，基于工具描述
                            if "获取" in tool_description:
                                friendly_message = f"我准备获取相关信息..."
                            elif "查询" in tool_description:
                                friendly_message = f"我准备查询相关信息..."
                            elif "搜索" in tool_description:
                                friendly_message = f"我准备搜索相关信息..."
                            else:
                                friendly_message = f"我准备执行相关操作..."
                            
                            # 安全发送WebSocket消息
                            await websocket.send_text(json.dumps({
                                "type": "tool_start",
                                "tool_name": tool_name,
                                "message": friendly_message
                            }, ensure_ascii=False))
                        except Exception as e:
                            print(f"[Error] 发送工具开始提示时出错: {e}")
                        
                        tool_result = await self.fastmcp_handler.call_tool(tool_name, processed_params)
                        
                        # 立即通过WebSocket返回工具结果
                        try:
                            await websocket.send_text(json.dumps({
                                "type": "tool_result",
                                "tool_name": tool_name,
                                "result": tool_result,
                                "message": f"已获取{tool_description}的结果，正在生成最终回答..."
                            }, ensure_ascii=False))
                        except Exception as e:
                            print(f"[Error] 发送工具结果时出错: {e}")
                        
                        # 将工具结果添加到对话历史
                        messages.append({
                            "tool_call_id": tool_call.id,
                            "role": "tool",
                            "name": function_name,
                            "content": json.dumps(tool_result, ensure_ascii=False)
                        })
                
                # 添加用户提示，要求基于工具结果回答问题
                messages.append({"role": "user", "content": f"请基于以上工具调用结果，用自然语言回答最初的问题。"})
                iteration += 1
            else:
                # 没有工具调用，使用流式方式获取最终响应
                completion = self.async_openai_client.chat.completions.create(
                    model=self.config.model_name,
                    messages=messages,
                    stream=True
                )
                
                try:
                    # 收集流式响应的内容
                    streaming_content = ""
                    
                    # 处理流式响应
                    for chunk in completion:
                        if chunk.choices:
                            choice = chunk.choices[0]
                            delta = choice.delta
                            
                            # 处理内容流
                            if delta.content:
                                token = delta.content
                                streaming_content += token
                                # 只yield token，不直接发送WebSocket消息，由WebSocket服务器统一处理
                                yield token
                    
                    final_result = streaming_content
                    break
                except Exception as e:
                    print(f"[Error] 流式响应时出错: {e}")
                    error_msg = f"发生错误: {e}"
                    # 发送错误消息，通过yield返回，由WebSocket服务器统一处理
                    yield error_msg
                    return
        
        # 保存记忆
        if use_memory:
            self.memory_manager.add_to_memory("user", query)
            if final_result:
                self.memory_manager.add_to_memory("assistant", final_result)
        
        # 发送完成标记，通过yield返回，由WebSocket服务器统一处理
        yield {"type": "complete", "content": ""}
    
    async def run(self, user_input):
        """运行AI Core"""
        # 1. 动态生成工具规范
        tools = await self.generate_tools_schema()
        self.tools = tools
        
        # 2. 初始对话历史
        messages = [{"role": "system", "content": self.system_prompt}]
        messages.append({"role": "user", "content": user_input})
        
        # 循环调用模型，支持工具链
        while True:
            # 调用模型，决定是否调用工具
            response = self.openai_client.chat.completions.create(
                model=self.config.model_name,
                messages=messages,
                tools=self.tools if self.tools else None,
                tool_choice="auto" if self.tools else None
            )
            
            response_message = response.choices[0].message
            messages.append(response_message)
            
            # 如果模型决定调用工具
            if response_message.tool_calls:
                for tool_call in response_message.tool_calls:
                    function_name = tool_call.function.name
                    function_args = json.loads(tool_call.function.arguments)
                    
                    if function_name == "call_fastmcp_tool":
                        # 调用FastMCP工具
                        tool_name = function_args.get("tool_name")
                        params = function_args.get("params", {})
                        
                        # 复制参数字典，避免修改原始数据
                        processed_params = params.copy()
                        
                        # 通用参数处理逻辑
                        # 1. 处理列表类型参数，只取第一个元素（如果是列表）
                        for key, value in list(processed_params.items()):
                            if isinstance(value, list) and len(value) > 0:
                                processed_params[key] = value[0]
                        
                        # 2. 实现通用的参数名称转换：下划线命名法(snake_case) -> 驼峰命名法(camelCase)
                        # 这是一种通用的参数转换规则，适用于大多数工具
                        converted_params = {}
                        for key, value in processed_params.items():
                            # 将下划线命名转换为驼峰命名
                            camel_key = ''.join(word.capitalize() if i > 0 else word 
                                              for i, word in enumerate(key.split('_')))
                            converted_params[camel_key] = value
                        
                        # 3. 合并原始参数和转换后的参数，转换后的参数优先级更高
                        # 这样可以保留原始参数，同时支持转换后的参数
                        processed_params.update(converted_params)
                        
                        # 4. 处理通用参数映射（保留原有逻辑，确保向后兼容）
                        # 如果存在query参数但没有search_query参数，将query转换为search_query
                        if "query" in processed_params and "search_query" not in processed_params:
                            processed_params["search_query"] = processed_params.pop("query")
                        # 如果存在from/to参数但没有origins/destinations参数，进行转换
                        if "from" in processed_params and "origins" not in processed_params:
                            processed_params["origins"] = processed_params.pop("from")
                        if "to" in processed_params and "destinations" not in processed_params:
                            processed_params["destinations"] = processed_params.pop("to")
                        
                        # 5. 为缺失的必填参数提供默认值
                        # 对于搜索类工具，如果没有搜索关键词，使用用户输入
                        if tool_name.startswith("webSearch") and "search_query" not in processed_params:
                            processed_params["search_query"] = user_input
                        
                        # 获取工具描述，生成友好提示
                        tool_description = self.fastmcp_handler.get_tool_description(tool_name)
                        logger.info(f"\n[AI Core] 正在调用工具: {tool_name} ({tool_description}) with params: {processed_params}")
                        tool_result = await self.fastmcp_handler.call_tool(tool_name, processed_params)
                        logger.info(f"[AI Core] 工具返回结果: {tool_result}")
                        
                        # 将工具结果添加到对话历史
                        messages.append({
                            "tool_call_id": tool_call.id,
                            "role": "tool",
                            "name": function_name,
                            "content": json.dumps(tool_result, ensure_ascii=False)
                        })
            else:
                # 如果模型没有调用工具，生成最终回复
                return response_message.content

# 工厂方法
async def create_ai_core_with_mcp(model_provider=None):
    """创建带MCP工具的AI核心"""
    try:
        # 初始化MCP配置加载器
        config_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'config')
        loader = MCPConfigLoader(config_dir)
        configs = loader.load_all_configs()
        clients = loader.initialize_clients()
        
        # 创建AI核心实例
        ai_core = AICore(configs, clients, model_provider=model_provider)
        
        # 获取工具列表
        tools = await ai_core.generate_tools_schema()
        ai_core.tools = tools
        
        return ai_core
    except Exception as e:
        print(f"[Error] 创建带MCP工具的AI核心时出错: {e}")
        # 如果MCP初始化失败，返回基础AI核心
        return AICore(model_provider=model_provider)

# --- 4. 主函数 ---
async def main():
    # 初始化AI Core
    ai_core = await create_ai_core_with_mcp()
    
    # 示例用户输入
    user_input = "西安到上海车票"
    
    print(f"[用户输入] {user_input}")
    
    # 运行AI Core
    result = await ai_core.run(user_input)
    
    print("\n[AI Core 最终回复]")
    print(result)

if __name__ == "__main__":
    asyncio.run(main())