"""
Core client functionality for Dolphin MCP.
"""
import copy
import json
from fastmcp import Client
from fastmcp.client.transports import PythonStdioTransport, SSETransport, StreamableHttpTransport

from mcp.types import Tool, TextContent
from typing import override, Dict, Tuple
from ..common import (
    MCPServerConfig, ModelParameter,
    get_logger    
)

from ..schemas import ModelResponse, ToolUseBlock

from .basic import BaseMCPClient

logger= get_logger(__name__)
"""
MCPClient
    function:
        - client to MCP Server and LLM
        - deal with data from MCP server
        - validate
"""
class MCPClient(BaseMCPClient):
    client: Client | None = None
    """Implementation for a single MCP server."""
    def __init__(self, 
                mcp_server: str, 
                mcp_config: MCPServerConfig, 
                model_param: ModelParameter):
        super().__init__(mcp_server, mcp_config, model_param)
        self.tools = []
        self.prompts = []
    @override
    async def start(self)->bool:
        """
        start connection session, and get tools, prompts data from MCP server
        """
        client: Client=None
        # to-do: enable AsyncExitStack()
        if self.mcp_config.transportType == "stdio":
            client=self.__stdio_start()
        elif self.mcp_config.transportType == "sse":
            client=self.__sse_start()
        elif self.mcp_config.transportType == "streamable-http":
            # streamable-http support in fastmcp, not mcp official feature at present
            client=self.__streamable_start()
        else:
            logger.error(f"error: server {self.server} has unsupported transport type {self.mcp_config.transportType}")
            return False
        # to-do: check none tools
        self.client = await self.exit_stack.enter_async_context(client)
        self.tools=await self.client.list_tools()
        self.dict_tools={tool.name: tool for tool in self.tools}
        self.prompts=await self.client.list_prompts()
        return True

    def __streamable_start(self)->Client:
        headers=None
        auth_token=None
        env=self.mcp_config.env
        if env is not None:
            auth_token=env.get("authorization_key")
        if auth_token is not None:
            headers={
                "Authorization": auth_token,
            }
        transport=StreamableHttpTransport(
            url=self.mcp_config.url,
            headers=headers,
        )
        return Client(transport)
    def __sse_start(self)->Client:
        headers=None
        auth_token=None
        env=self.mcp_config.env
        if env is not None:
            auth_token=env.get("authorization_key")
        if auth_token is not None:
            headers={
                "Authorization": auth_token,
            }
        transport=SSETransport(
            url=self.mcp_config.url,
            headers=headers,
        )
        return Client(transport)
    def __stdio_start(self)->Client:
        # to-do: function limit, need to debug...
        # at present, only python supported...
        transport = PythonStdioTransport(
            # python_cmd="/usr/bin/python3.11", # Optional: specify Python interpreter
            script_path=self.mcp_config.command,
            # args=["--some-server-arg"],      # Optional: pass arguments to the script
            args=self.mcp_config.args,
            # env={"MY_VAR": "value"},         # Optional: set environment variables
            env=self.mcp_config.env,
        )
        return Client(transport)
    
    @override
    def ping(self)->bool:
        return self.client.ping()
    
    @override
    async def cleanup(self):
        """Clean up resources"""
        await self.exit_stack.aclose()
    
    def check_tool_callable(self, content: ToolUseBlock)->bool:
        tool: Tool = self.dict_tools[content.name]
        required=tool.inputSchema.get("required")
        if required is None:
            if content.input is None:
                return True
            else:
                return False
        else:
            if content.input is None:
                return False
            else:
                # to-do: 
                #   - Optional: maybe we need to check all required arguments variable
                if len(content.input)<len(required):
                    return False
                else:
                    return True
    
    def check_call_next_tool(self, tool_name: str, result: TextContent)->Tuple[bool, Dict]:
        tool: Tool=self.dict_tools[tool_name]
        required=tool.inputSchema.get("required", None)
        if required is None: return False, None
        if self.server_name == "postgres-mcp" and isinstance(required,list):
            data = json.loads(result.text)
            arguments = []
            n=len(required)
            for row in data:
                m=0
                argument={}
                for key in required:
                    value = row.get(key, None)
                    if value:
                        argument[key]=value
                if len(argument) == n:
                    arguments.append(argument)
            if len(arguments)>0:
                return True, arguments
            else: pass
        else: pass
        return False, None

    async def process_query(self, query: str) -> str:
        """
        Process a query using LLM Provider and available tools
        """
        messages=[]
        messages.append(
            {
                "role": "user",
                "content": query
            } 
        )
        response: ModelResponse = await self.model.client.response(
            messages,
            tools=self.tools
        )
        logger.info(f"{response.provider}/{response.model}: execute LLM query successfully with created_time {response.created_at} ")
        # Process response and handle tool calls
        final_text = []
        if hasattr(response.message.tool_calls) and response.message.tool_calls is not None:
            messages.append(response.message.model_dump_json(exclude_none=True))
            for tc in response.message.tool_calls:
                # to_do: check arguments 
                tool_name =tc.function.name
                arguments=tc.function.arguments
                result = await self.client.call_tool(tc.function.name, tc.function.arguments)
                logger.info(f"[Calling tool={tool_name} with arguments={arguments}] with result: [{result[0].text}]")
                final_text.append(f"[Calling tool {tool_name} with args {arguments}]")
                tool_message= {
                    "role": "tool",
                    "content": result[0].text,
                    "name": tc.function.name,
                }
                if tc.id:
                    tool_message["tool_call_id"]=tc.id
                messages.append(tool_message)
            response = await self.model.client.response(
                    messages, 
                    tools=[],
                )
            logger.info(f"{response.provider}/{response.model}: generate finally result successfully with created_time: {response.created_at}")
            final_text.append(response.message.content)
        else: pass
        return "\n".join(final_text)