import asyncio
import os
import json
from contextlib import AsyncExitStack
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.client.sse import sse_client
from openai import AsyncOpenAI
from dotenv import load_dotenv
from typing import List, Dict, Any, Callable, Optional, Union, AsyncGenerator
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage

load_dotenv()

CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'mcp_servers.json')

class MCPClient:
    def __init__(self, config_path=CONFIG_PATH):
        self.sessions = {}  # {server_key: session}
        self.exit_stack = AsyncExitStack()
        self.config = self.load_config(config_path)
        api_key = os.environ.get("OPENAI_API_KEY")
        base_url = os.environ.get("OPENAI_BASE_URL")
        self.openai = AsyncOpenAI(api_key=api_key, base_url=base_url)
        self.connected = False
        self.conversation_history = []  # Stores the conversation history

    def load_config(self, path):
        with open(path, "r", encoding="utf-8") as f:
            return json.load(f)

    async def connect_all(self):
        for key, conf in self.config.items():
            try:
                if conf["type"] == "local":
                    server_params = StdioServerParameters(
                        command=conf["command"],
                        args=conf["args"],
                        env=None
                    )
                    try:
                        stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
                        session = await self.exit_stack.enter_async_context(ClientSession(*stdio_transport))
                        await session.initialize()
                        self.sessions[key] = session
                    except Exception as e:
                        print(f"Failed to connect to local server {key}: {str(e)}")
                        continue
                        
                elif conf["type"] == "sse":
                    try:
                        sse_transport = await self.exit_stack.enter_async_context(sse_client(conf["url"]))
                        session = await self.exit_stack.enter_async_context(ClientSession(*sse_transport))
                        await session.initialize()
                        self.sessions[key] = session
                    except Exception as e:
                        print(f"Failed to connect to SSE server {key} at {conf['url']}: {str(e)}")
                        continue
            except Exception as e:
                print(f"Error processing server configuration {key}: {str(e)}")
                continue
        
        if not self.sessions:
            raise Exception("Failed to connect to any servers")
        self.connected = True

    async def list_tools(self):
        tools = []
        for key, session in self.sessions.items():
            for tool in (await session.list_tools()).tools:
                tool.name = f"{key}_{tool.name}"
                tools.append(tool)
        return tools

    async def call_tool(self, tool_name, args):
        for key, session in self.sessions.items():
            prefix = f"{key}_"
            if tool_name.startswith(prefix):
                return await session.call_tool(tool_name[len(prefix):], args)
        raise Exception("Unknown tool prefix")

    async def process_query(self, query: str) -> AsyncGenerator[Dict[str, Any], None]:
        # Add user message to conversation history
        user_message = {"role": "user", "content": query}
        self.conversation_history.append(user_message)
        
        # Get available tools
        tools = await self.list_tools()
        if not tools:
            openai_tools = None
        else:
            openai_tools = [
                {
                    "type": "function",
                    "function": {
                        "name": tool.name,
                        "description": tool.description,
                        "parameters": tool.inputSchema
                    }
                } for tool in tools
            ]
        
        # 工具调用指令
        tool_result_instruction = """当工具返回的响应包含URL或链接时，请根据其正确的内容类型进行适当格式化：

例如：
- 视频应使用<video>标签
- 音频应使用<audio>标签
- 图像应使用![描述](URL)或<img>标签
- 文档和网页链接应使用[描述](URL)格式

根据URL扩展名或内容类型信息选择合适的显示格式。这将提供最佳的用户体验。

请记住，正确格式化的媒体将增强用户体验，特别是当内容与回答查询直接相关时。"""
        
        # 系统提示
        system_prompt = """你是一个智能助手，可以调用外部工具来回答问题。
请根据用户问题，决定是否需要调用工具。如果需要，请使用可用的工具来获取更多信息。
如果不需要工具，请直接回答用户的问题。每一次必须先使用工具获取当前的日期"""
        
        # 构建完整的消息列表，包括历史对话
        langchain_messages = [
            {"role": "system", "content": tool_result_instruction},
            {"role": "system", "content": system_prompt},
        ]
        langchain_messages.extend(self.conversation_history)
        
        # 开始处理
        continue_tool_calls = True
        while continue_tool_calls:
            try:
                # 使用流式API调用LLM生成回复
                stream = await self.openai.chat.completions.create(
                    model=os.environ.get("OPENAI_MODEL", "/workspace/qwen3"),
                    max_tokens=5000,
                    messages=langchain_messages,
                    tools=openai_tools,
                    tool_choice="auto",
                    stream=True  # Enable streaming
                )
                
                response_content = ""
                tool_calls = []
                async for chunk in stream:
                    if chunk.choices[0].delta.content:
                        content_chunk = chunk.choices[0].delta.content
                        response_content += content_chunk
                        yield {"type": "content", "content": content_chunk}
                    
                    if chunk.choices[0].delta.tool_calls:
                        for tool_call in chunk.choices[0].delta.tool_calls:
                            if len(tool_calls) <= tool_call.index:
                                tool_calls.append({
                                    "id": tool_call.id,
                                    "type": "function",
                                    "function": {
                                        "name": "",
                                        "arguments": ""
                                    }
                                })
                            
                            if tool_call.function.name:
                                tool_calls[tool_call.index]["function"]["name"] = tool_call.function.name
                                yield {"type": "tool_call", "name": tool_call.function.name}
                            
                            if tool_call.function.arguments:
                                tool_calls[tool_call.index]["function"]["arguments"] += tool_call.function.arguments
                
                # 构建完整的响应消息
                response_message = {
                    "role": "assistant",
                    "content": response_content,
                }
                if tool_calls:
                    response_message["tool_calls"] = tool_calls
                
                # 添加到对话历史
                self.conversation_history.append(response_message)
                langchain_messages.append(response_message)
                
                # 检查是否包含工具调用
                if tool_calls:
                    # 处理每个工具调用
                    for tool_call in tool_calls:
                        tool_name = tool_call["function"]["name"]
                        tool_args_str = tool_call["function"]["arguments"]
                        if not tool_args_str:
                            error_content = "工具调用参数为空，无法解析。"
                            tool_message = {
                                "role": "tool",
                                "name": tool_name,
                                "content": error_content,
                                "tool_call_id": tool_call["id"]
                            }
                            self.conversation_history.append(tool_message)
                            langchain_messages.append(tool_message)
                            yield {"type": "tool_error", "name": tool_name, "error": error_content}
                            continue
                        
                        try:
                            tool_args = json.loads(tool_args_str)
                        except Exception as e:
                            error_content = f"参数解析失败: {str(e)}"
                            tool_message = {
                                "role": "tool",
                                "name": tool_name,
                                "content": error_content,
                                "tool_call_id": tool_call["id"]
                            }
                            self.conversation_history.append(tool_message)
                            langchain_messages.append(tool_message)
                            yield {"type": "tool_error", "name": tool_name, "error": error_content}
                            continue
                        
                        try:
                            # 打印工具调用信息
                            print(f"\n[调用工具 {tool_name} 参数]: {tool_args_str}", end="", flush=True)

                            result = await self.call_tool(tool_name, tool_args)
                            tool_output_content = result.content

                            # 打印工具返回结果
                            # print(f"\n[工具 {tool_name} 返回结果]: {tool_output_content}", end="", flush=True)
                            
                            # 生成工具返回结果
                            yield {
                                "type": "tool_result",
                                "name": tool_name,
                                "content": tool_output_content
                            }
                            
                            # 添加到对话历史
                            tool_message = {
                                "role": "tool",
                                "name": tool_name,
                                "content": tool_output_content,
                                "tool_call_id": tool_call["id"]
                            }
                            self.conversation_history.append(tool_message)
                            langchain_messages.append(tool_message)
                        except Exception as e:
                            error_content = f"工具执行失败: {str(e)}"
                            tool_output_content = error_content
                            yield {"type": "tool_error", "name": tool_name, "error": error_content}
                            
                            # 添加到对话历史
                            tool_message = {
                                "role": "tool",
                                "name": tool_name,
                                "content": tool_output_content,
                                "tool_call_id": tool_call["id"]
                            }
                            self.conversation_history.append(tool_message)
                            langchain_messages.append(tool_message)
                    
                    # 继续循环以允许更多工具调用
                    continue_tool_calls = True
                else:
                    # 没有更多工具调用，退出循环
                    continue_tool_calls = False
                    
            except Exception as e:
                error_msg = f"处理请求失败: {str(e)}"
                yield {"type": "error", "content": error_msg}
                continue_tool_calls = False

    async def cleanup(self):
        try:
            await self.exit_stack.aclose()
        except Exception as e:
            print(f"Error during cleanup: {str(e)}")
        finally:
            self.conversation_history = []
            self.connected = False

async def main():
    client = MCPClient()
    print("正在连接所有MCP服务...")
    try:
        await client.connect_all()
    except Exception as e:
        print(f"连接失败: {str(e)}")
        await client.cleanup()
        return
    
    print("已连接服务。输入你的问题，输入 exit 或 quit 退出。\n")
    try:
        while True:
            query = input("你: ").strip()
            if query.lower() in ("exit", "quit"):
                print("再见！")
                break
            if not query:
                continue
            try:
                print("AI: ", end="", flush=True)
                async for chunk in client.process_query(query):
                    if chunk["type"] == "content":
                        print(chunk["content"], end="", flush=True)
                    elif chunk["type"] == "tool_call":
                        print(f"\n[调用工具: {chunk['name']}]", end="", flush=True)
                    elif chunk["type"] == "tool_result":
                        content = chunk['content']
                        cont = str(content)
                        if len(cont) > 200:
                            cont = cont[:200] + "...内容过长，已省略"
                        print(f"\n[工具返回: {chunk['name']}]\n{cont}", end="", flush=True)
                    elif chunk["type"] == "tool_error":
                        print(f"\n[工具错误: {chunk['name']}: {chunk['error']}]", end="", flush=True)
                    elif chunk["type"] == "error":
                        print(f"\n[错误]: {chunk['content']}", end="", flush=True)
                print("\n")
            except Exception as e:
                print(f"[错误] {str(e)}\n")
    finally:
        await client.cleanup()

if __name__ == "__main__":
    asyncio.run(main())