import json
import requests
from typing import Dict, List, Optional, Union, Any


class DynamicMCPClient:
    """
    An MCP client that allows the LLM to dynamically decide when to use the MCP server.
    """

    def __init__(
            self,
            ollama_url: str = "http://localhost:11434",
            mcp_server_url: str = "http://localhost:3000",
            model_name: str = "deepseek-r1:latest"  # llama3.2:3b
    ):
        """
        Initialize the Dynamic MCP client.
        """
        self.ollama_url = ollama_url
        self.mcp_server_url = mcp_server_url
        self.model_name = model_name

        # Validate connections
        self._validate_connections()

    def _validate_connections(self) -> None:
        """Validate connections to both Ollama and MCP server."""
        # Check Ollama connection
        try:
            response = requests.get(f"{self.ollama_url}/api/tags")
            if response.status_code != 200:
                raise ConnectionError(f"Failed to connect to Ollama: {response.text}")
        except requests.RequestException as e:
            raise ConnectionError(f"Failed to connect to Ollama: {str(e)}")

        # Check MCP server connection
        try:
            response = requests.get(f"{self.mcp_server_url}/health")
            if response.status_code != 200:
                raise ConnectionError(f"Failed to connect to MCP server: {response.text}")
        except requests.RequestException as e:
            raise ConnectionError(f"Failed to connect to MCP server: {str(e)}")

    def query_ollama(
            self,
            prompt: str,
            system_prompt: Optional[str] = None,
            temperature: float = 0.7,
            stream: bool = False
    ) -> Dict[str, Any]:
        """
        Send a query to the Ollama API.
        """
        payload = {
            "model": self.model_name,
            "prompt": prompt,
            "temperature": temperature,
            "stream": stream
        }

        if system_prompt:
            payload["system"] = system_prompt

        response = requests.post(
            f"{self.ollama_url}/api/generate",
            json=payload
        )

        if response.status_code != 200:
            raise RuntimeError(f"Ollama API error: {response.text}")

        return response.json()

    def query_mcp_server(self, query: Dict[str, Any]) -> Dict[str, Any]:
        """
        Send a query to the MCP server.
        """
        response = requests.post(
            f"{self.mcp_server_url}/query",
            json=query
        )

        if response.status_code != 200:
            raise RuntimeError(f"MCP server error: {response.text}")

        return response.json()

    def get_available_tools(self) -> List[Dict[str, Any]]:
        """
        Get the list of available tools from the MCP server.
        """
        try:
            response = requests.get(f"{self.mcp_server_url}/tools")
            if response.status_code == 200:
                return response.json().get("tools", [])
            return []
        except:
            return []

    def dynamic_mcp_decision(self, user_message: str) -> Dict[str, Any]:
        """
        Ask the LLM to decide if MCP context is needed and what specific tools to call.

        This is a key method that lets the LLM itself determine when to use external context.
        """
        # Get available tools
        available_tools = self.get_available_tools()

        # Create a special decision prompt for the LLM
        tool_descriptions = "\n".join([
            f"- {tool['name']}: {tool['description']}"
            for tool in available_tools
        ])

        decision_prompt = f"""
You are an AI assistant that needs to decide whether external context is needed to answer a user's question.

USER QUERY: {user_message}

Based on this query, you should:
1. Decide if you need additional context or information to give a good answer
2. If context is needed, specify which tool would be most helpful

Available tools:
{tool_descriptions if tool_descriptions else "- knowledge_base: Retrieve information from the knowledge base"}

Your response should be valid JSON with this format:
{{
  "needs_context": true/false,
  "reasoning": "brief explanation of your decision",
  "tool": "name of the tool to use (if needs_context is true)",
  "tool_params": {{relevant parameters for the tool (if applicable)}}
}}

JSON RESPONSE:
"""

        # Use a system prompt to enforce JSON output
        system_prompt = "You are a helpful assistant. Your task is to analyze the user query and decide if external context is needed. Respond with valid JSON only."

        # Get the LLM's decision
        response = self.query_ollama(
            prompt=decision_prompt,
            system_prompt=system_prompt,
            temperature=0.2  # Low temperature for more deterministic responses
        )

        llm_output = response.get("response", "")

        # Extract the JSON from the response
        try:
            # Sometimes the LLM might include extra text around the JSON, so we need to extract it
            json_start = llm_output.find("{")
            json_end = llm_output.rfind("}") + 1

            if json_start >= 0 and json_end > json_start:
                decision_json = json.loads(llm_output[json_start:json_end])
                return decision_json
            else:
                # Default to not using context if we can't parse the JSON
                return {"needs_context": False, "reasoning": "Failed to parse decision JSON"}
        except json.JSONDecodeError:
            # Default to not using context if we can't parse the JSON
            return {"needs_context": False, "reasoning": "Failed to parse decision JSON"}

    def process_with_dynamic_mcp(
            self,
            user_message: str,
            chat_history: Optional[List[Dict[str, str]]] = None,
            system_prompt: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        Process a user message with dynamic MCP decision-making.
        """
        if chat_history is None:
            chat_history = []

        # Let the LLM decide if we need to access the MCP server
        decision = self.dynamic_mcp_decision(user_message)

        needs_mcp = decision.get("needs_context", False)
        prompt_to_use = user_message
        mcp_used = False
        mcp_reasoning = decision.get("reasoning", "")
        tool_used = decision.get("tool", "")

        if needs_mcp:
            # Prepare query for MCP server
            mcp_query = {
                "query": user_message,
                "history": chat_history,
                "model": self.model_name,
                "tool": tool_used,
                "tool_params": decision.get("tool_params", {})
            }

            # Get data from MCP server
            mcp_response = self.query_mcp_server(mcp_query)

            # Enhance the prompt with MCP context
            context = mcp_response.get("context", "")

            if context:
                prompt_to_use = self._format_prompt_with_mcp_context(
                    user_message,
                    context
                )

                mcp_used = True

        # Query Ollama with the possibly enhanced prompt
        response = self.query_ollama(
            prompt=prompt_to_use,
            system_prompt=system_prompt
        )

        # Update with MCP metadata
        response["mcp_enhanced"] = mcp_used
        response["mcp_reasoning"] = mcp_reasoning
        response["mcp_tool_used"] = tool_used if mcp_used else ""

        return response

    def _format_prompt_with_mcp_context(self, original_query: str, context: str) -> str:
        """
        Format the prompt with additional context from the MCP server.
        """
        return f"""Context information:
{context}

Based on the above context, please respond to the following query:
{original_query}"""


def main():
    """Example usage of the DynamicMCPClient."""
    client = DynamicMCPClient()

    # Example query
    user_message = "What are the key features of the Model Context Protocol?"

    # Process with dynamic MCP
    response = client.process_with_dynamic_mcp(user_message)

    # Print the response
    print("\nResponse:")
    print(response.get("response", "No response generated"))

    # Print MCP metadata
    if response.get("mcp_enhanced", False):
        print("\nMCP context was used.")
        print(f"Reasoning: {response.get('mcp_reasoning', '')}")
        print(f"Tool used: {response.get('mcp_tool_used', '')}")
    else:
        print("\nNo MCP context was used.")
        print(f"Reasoning: {response.get('mcp_reasoning', '')}")


if __name__ == "__main__":
    main()
