import asyncio
import base64
import io
import json
import time
from typing import List, Dict, Any, Optional
from contextlib import AsyncExitStack

from mcp.types import CallToolResult, ImageContent
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from openai import OpenAI
from PIL import Image

loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)

class MCPClientWrapper:
    def __init__(self):
        self.session = None
        self.exit_stack = None
        self.llm = OpenAI()
        self.model = "DeepSeek-R1" # "deepseek-r1-250528" # "DeepSeek-V3_1" # "Qwen3-32B" # "Qwen3-235B-A22B" # "Qwen3-30B-A3B" # "deepseek-chat" # "DeepSeek-V3" # "Qwen3-4B" # "Qwen3-8B"
        self.tools = []
    
    def connect(self, server_path: str) -> str:
        return loop.run_until_complete(self._connect(server_path))
    
    async def _connect(self, server_path: str) -> str:
        if self.exit_stack:
            await self.exit_stack.aclose()
        
        self.exit_stack = AsyncExitStack()
        
        # 判断服务器类型（Python或Node.js）
        is_python = server_path.endswith('.py')
        command = "python" if is_python else "node"
        
        # 配置服务器参数
        server_params = StdioServerParameters(
            command=command,
            args=[server_path],
            env={"PYTHONIOENCODING": "utf-8", "PYTHONUNBUFFERED": "1"}
        )
        
        # 建立stdio连接
        stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
        self.stdio, self.write = stdio_transport
        
        # 创建MCP会话
        self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))
        await self.session.initialize()
        
        # 获取可用工具列表
        response = await self.session.list_tools()
        self.tools = [{
            "type": "function",
            "function": {
                "name": tool.name,
                "description": tool.description,
                "parameters": tool.inputSchema
            }
        } for tool in response.tools]
        
        tool_names = [tool["function"]["name"] for tool in self.tools]
        return f"Connected to MCP server. Available tools: {', '.join(tool_names)}"
    
    def call_tool(self, tool_name: str, tool_args: Dict[str, Any]) -> CallToolResult:
        return loop.run_until_complete(self._call_tool(tool_name, tool_args))
    
    async def _call_tool(self, tool_name: str, tool_args: Dict[str, Any]) -> CallToolResult:
        if not self.session:
            raise Exception("MCP server is not connected.")
        
        return await self.session.call_tool(tool_name, tool_args)
    
    def generate_animations(self, num_days: int, lat_lon: tuple[int, int], timezone_delta: float, interval: int) -> list[Image.Image | None]:
        return loop.run_until_complete(self._generate_animations(num_days, lat_lon, timezone_delta, interval))
    
    async def _generate_animations(self, num_days: int, lat_lon: tuple[int, int], timezone_delta: float, interval: int) -> list[Image.Image | None]:
        if not self.session:
            raise Exception("MCP server is not connected.")
        
        tool_name = "generate_animations"
        tool_args = {
            "num_days": num_days,
            "lat_lon": lat_lon,
            "timezone_delta": timezone_delta,
            "interval": interval
        }
        result = await self.session.call_tool(tool_name, tool_args)
        
        images = []
        result_content = result.content
        if isinstance(result_content, list):
            for item in result_content:
                image = None
                if isinstance(item, ImageContent):
                    if item.type == "image" and item.mimeType == "image/gif":
                        image = Image.open(io.BytesIO(base64.b64decode(item.data)))
                images.append(image)
        
        return images
    
    def query(
            self, 
            user_message: str, 
            system_message: Optional[str] = None, 
            history: Optional[List[Dict[str, Any]]] = None
    ) -> Dict[str, Any]:
        # 构建消息
        messages = []
        
        if history:
            for msg in history:
                role, content = msg.get("role"), msg.get("content")
                
                if role in ["user", "assistant", "system"]:
                    messages.append({"role": role, "content": content})
        
        if system_message:
            messages.append({"role": "system", "content": system_message})
        
        messages.append({"role": "user", "content": user_message})
        
        result = {}
        result["history"] = messages # 构建消息历史
        yield result

        # 调用大模型
        response = self.llm.chat.completions.create(
            messages=messages, 
            model=self.model, 
            stream=True, 
            max_completion_tokens=10240, 
            temperature=0, 
            top_p=0.7, 
            extra_body={
                # "thinking": True,
                "top_k": 20,
            }, 
            frequency_penalty=1, 
            tools=self.tools
        )

        start_time = time.time()
        phase = "reasoning process"
        content = ""
        result["history"].append({
            "role": "assistant",
            "content": content,
            "metadata": {"title": "推理过程", "id": "reasoning process", "status": "pending"}
        })
        yield result

        tool_name, tool_args = "", ""

        for chunk in response:
            if len(chunk.choices) == 0:
                continue

            delta = chunk.choices[0].delta
            t, s, r = None, "", ""
            if hasattr(delta, "tool_calls") and delta.tool_calls:
                t = "tool_calls"
                for tool_call in delta.tool_calls:
                    # 修复属性访问问题
                    if hasattr(tool_call, 'function'):
                        s = tool_call.function.name
                        r = tool_call.function.arguments
                    else:
                        # 处理可能的其他格式
                        s = getattr(tool_call, 'name', '')
                        r = getattr(tool_call, 'arguments', '')
                    
                    # 目前仅处理第一个工具调用
                    break
            elif hasattr(delta, "reasoning_content") and delta.reasoning_content:
                t, s = "reasoning_content", delta.reasoning_content
            elif delta.content:
                t, s = "content", delta.content

            flag = False
            if "content" == t:
                if "reasoning process" == phase:
                    result["history"][-1]["content"] = content
                    flag = True
                    phase = "reply"
                    content = s
                    result["history"].append({"role": "assistant", "content": content})
                else:
                    content += s
            elif "reasoning_content" == t:
                content += s
            elif "tool_calls" == t:
                if s:
                    tool_name += s
                if r:
                    tool_args += r
                continue
            else:
                continue

            if flag:
                result["history"][-2]["metadata"]["status"] = "done"
                result["history"][-2]["metadata"]["duration"] = time.time() - start_time
                flag = False
            else:
                result["history"][-1]["content"] = content

            yield result

        if tool_name and tool_args:
            start_time = time.time()
            tool_args = tool_args.split('\n', 1)[0] # 若字符串含多行字符，只取首行
            result["history"].append({
                "role": "assistant",
                "content": f"我将使用{tool_name}工具来帮助回答你的问题。",
                "metadata": {
                    "title": f"使用工具：{tool_name}",
                    "log": f"参数：{tool_args}",
                    "status": "pending",
                    "id": f"tool_call_{tool_name}"
                }
            })
            
            result["history"].append({
                "role": "assistant",
                "content": "```json\n" + tool_args + "\n```",
                "metadata": {
                    "parent_id": f"tool_call_{tool_name}",
                    "id": f"params_{tool_name}",
                    "title": "工具参数"
                }
            })
            
            result["tool_calls"] = []
            tool_args = json.loads(tool_args)
            # 对"lat_lon"的值进行类型转换(list -> tuple)
            if "lat_lon" in tool_args:
                tool_args["lat_lon"] = tuple(tool_args["lat_lon"])
            result["tool_calls"].append({"name": tool_name, "args": tool_args})
            
            yield result
        else:
            raise Exception("未能通过大模型确定用户指定地点的经纬度坐标及其今天的当地时间与世界标准时间的时差，无法调用工具生成天气预报动画！")

        # 调用MCP工具
        call_tool_result = self.call_tool(tool_name, tool_args)
        
        result_content = call_tool_result.content
        if "generate_animations" == tool_name:
            images = []
            if isinstance(result_content, list):
                for item in result_content:
                    image = None
                    if isinstance(item, ImageContent):
                        if "image" == item.type and "image/gif" == item.mimeType:
                            image = Image.open(io.BytesIO(base64.b64decode(item.data)))
                    images.append(image)
            result["tool_calls"][0]["result"] = images
        else:
            result["tool_calls"][0]["result"] = result_content
        
        if result["history"] and "metadata" in result["history"][-2]:
            result["history"][-2]["metadata"]["status"] = "done"
            result["history"][-2]["metadata"]["duration"] = time.time() - start_time
        
        # result["history"].append({
        #     "role": "assistant",
        #     "content": "这是工具返回的结果：",
        #     "metadata": {
        #         "title": f"{tool_name}工具返回的结果",
        #         "status": "done",
        #         "id": f"result_{tool_name}"
        #     }
        # })

        # if isinstance(result_content, list):
        #     result_content = "\n".join(str(item) for item in result_content)

        # result["history"].append({
        #     "role": "assistant",
        #     "content": "```\n" + result_content + "\n```",
        #     "metadata": {
        #         "parent_id": f"result_{tool_name}",
        #         "id": f"raw_result_{tool_name}",
        #         "title": "原始输出"
        #     }
        # })

        yield result

        # message = {"role": "user", "content": f"使用{tool_name}工具得到的结果：{result_content}"}
        # messages.append(message)
        # result["history"].append(message)
        # yield result

        # # 调用大模型
        # response = self.llm.chat.completions.create(
        #     messages=messages, 
        #     model=self.model, 
        #     stream=True, 
        #     max_completion_tokens=10240, 
        #     temperature=0, 
        #     top_p=0.7, 
        #     extra_body={
        #         "top_k": 20,
        #     }, 
        #     frequency_penalty=1
        # )

        # content = ""
        # result["history"].append({"role": "assistant", "content": content})

        # for chunk in response:
        #     if len(chunk.choices) == 0:
        #         continue

        #     delta = chunk.choices[0].delta
        #     if delta.content:
        #         content += delta.content
        #         result["history"][-1]["content"] = content
        #     else:
        #         continue

        #     yield result

    def close(self) -> None:
        return loop.run_until_complete(self._close())
    
    async def _close(self) -> None:
        if self.exit_stack:
            await self.exit_stack.aclose()
            self.exit_stack = None
            self.session = None

# end of file
