# Purpose: Uses LLM to decide next step or generate plan.
# Changes:
# - Corrected usage of len() with result of session.list_tools(). Assumes list or object with .tools.

import logging
import json
import os
from typing import Optional, List, Dict, Any, Union, Sequence # Added Sequence

# Use relative imports
from ..llm_interface.base import LLMClient
from ..llm_interface.openai_client import OpenAIClient
from ..core.config import get_config_value
from ..core.exceptions import AgentError, LLMError, ConfigError, MCPClientError
from ..schemas.agent import Task, Plan, Step, AgentObservation
from ..schemas.mcp.web_automation import PageObservation

from ..llm_interface.prompt_builder import format_prompt
from ..mcp_client import get_mcp_session
try:
    from mcp import types as mcp_types
except ImportError:
    mcp_types = None

from ..core.logging_setup import get_logger
logger = get_logger(__name__)


class Planner:
    """Handles task planning and step-by-step action decision using an LLM."""

    def __init__(self, llm_client: Optional[LLMClient] = None):
        """Initializes the planner with an LLM client."""
        if llm_client: self.llm_client = llm_client
        else:
            try:
                if not os.getenv("OPENAI_API_KEY"): raise ConfigError("OpenAI API key not set.")
                self.llm_client = OpenAIClient()
                logger.info("Planner initialized with default OpenAIClient.")
            except (ConfigError, ImportError, LLMError) as e:
                raise AgentError(f"LLM Client init error for Planner: {e}") from e

        self.planning_model = get_config_value("agent.planning_model", "gpt-4o")
        self.planning_temp = get_config_value("agent.planning_temperature", 0.1)
        self.planning_max_tokens = get_config_value("agent.planning_max_tokens", 1500)
        self.tool_descriptions_str: Optional[str] = None # Cache for descriptions

    async def _load_tool_descriptions(self) -> str:
        """Loads tool descriptions dynamically from the MCP server."""
        if self.tool_descriptions_str is not None:
            return self.tool_descriptions_str

        logger.info("Planner: Loading available tool descriptions from MCP server...")
        try:
            session = await get_mcp_session()
            # list_tools() likely returns List[ToolDefinition] or similar based on examples
            # Let's assume it's list-like or has a .tools attribute
            list_tools_result = await session.list_tools()

            actual_tools: Sequence[Any] = [] # Use Sequence for broader type hint
            if isinstance(list_tools_result, list):
                actual_tools = list_tools_result
            elif hasattr(list_tools_result, 'tools') and isinstance(getattr(list_tools_result, 'tools'), list):
                 actual_tools = getattr(list_tools_result, 'tools')
            else:
                 logger.warning(f"MCP list_tools() returned unexpected type: {type(list_tools_result)}. Assuming no tools.")
                 actual_tools = []

            tool_count = len(actual_tools) # len() works on sequences (like lists)
            logger.info(f"Planner: Received {tool_count} tools from MCP server.")

            if not actual_tools:
                 self.tool_descriptions_str = "No tools available from MCP server."
                 return self.tool_descriptions_str

            descriptions = []
            for tool in actual_tools:
                # Use getattr for safe access to attributes
                name = getattr(tool, 'name', 'UnknownTool')
                desc = getattr(tool, 'description', 'No description.')
                # Attempt to represent args schema simply
                args_schema = getattr(tool, 'arguments_schema', None)
                schema_str = f"arguments_schema: {json.dumps(args_schema, indent=2)}" if args_schema else "arguments_schema: {}"

                descriptions.append(f"- tool_name: {name}\n  description: {desc}\n  {schema_str}")

            self.tool_descriptions_str = "\n\n".join(descriptions) # Add more spacing
            logger.debug(f"Formatted Tool Descriptions:\n{self.tool_descriptions_str}")
            return self.tool_descriptions_str

        except MCPClientError as e:
             logger.error(f"Planner: Failed to list tools via MCP client: {e}", exc_info=False)
             raise AgentError(f"Could not load tool descriptions: {e}") from e
        except Exception as e:
             logger.error(f"Planner: Unexpected error loading tool descriptions: {e}", exc_info=True)
             raise AgentError(f"Unexpected error loading tools: {e}") from e

    # Rest of the Planner class (_format_observation_for_prompt, decide_next_step, _parse_llm_step_decision)
    # remains the same as the previous corrected version, ensuring Step instantiation includes defaults.

    def _format_observation_for_prompt(self, observation: Optional[AgentObservation]) -> Dict[str, str]:
        """Formats the observation data into strings suitable for the LLM prompt."""
        if observation is None:
             return { "page_url": "N/A", "page_title": "N/A", "focused_text": "No observation performed yet or observation failed.", "interactable_elements_string": "N/A", "observation_error": "" }

        formatted_obs = { "page_url": "N/A", "page_title": "N/A", "focused_text": "Observation data not applicable.", "interactable_elements_string": "N/A", "observation_error": "" }
        if observation.observation_type == "web_page" and isinstance(observation.data, PageObservation):
            page_data: PageObservation = observation.data
            formatted_obs["page_url"] = str(page_data.url) if page_data.url else "N/A"
            formatted_obs["page_title"] = page_data.title or "N/A"
            formatted_obs["focused_text"] = page_data.focused_text or "No focused text content."
            elements_list_str = []
            if page_data.interactable_elements:
                for i, el in enumerate(page_data.interactable_elements):
                    text_repr = f" Text='{el.text}'" if el.text else ""
                    attr_str = json.dumps(el.attributes); attr_repr = f" Attr={attr_str[:100]}{'...' if len(attr_str)>100 else ''}" if el.attributes else ""
                    elements_list_str.append(f"- ID: {el.agent_id}, Tag: <{el.tag_name}>{text_repr}{attr_repr}")
                formatted_obs["interactable_elements_string"] = "\n".join(elements_list_str) if elements_list_str else "No interactable elements found."
            else: formatted_obs["interactable_elements_string"] = "No interactable elements identified."
            if page_data.error_message: formatted_obs["observation_error"] = f"Note: Observation Error: {page_data.error_message}"
        elif observation.data:
            formatted_obs["focused_text"] = f"Obs Type: {observation.observation_type}\nData: {str(observation.data)[:1000]}..."
        return formatted_obs

    async def decide_next_step( self, task: Task, current_observation: Optional[AgentObservation], step_history: List[Step] ) -> Optional[Step]:
        """Decides the single next action step."""
        planner_step_id = (step_history[-1].id + 1) if step_history else 1
        logger.info(f"Planner: Deciding next step (ID: {planner_step_id}) for Task: {task.id}")
        try: action_tool_descs = await self._load_tool_descriptions()
        except AgentError as e: logger.error(f"Planner: Cannot decide next step: {e}"); raise
        formatted_obs = self._format_observation_for_prompt(current_observation)
        history_str = "\n".join([f"Step {s.id}: Tool={s.tool_name}, Status={s.status.upper()}, Result={repr(s.result)[:100]}..." for s in step_history[-5:]])
        prompt_context = { "task_description": task.description, "action_tool_descriptions": action_tool_descs, "recent_history": history_str or "No previous steps.", **formatted_obs }
        try: formatted_prompt = format_prompt("planning/decide_next_action", prompt_context)
        except (ValueError, FileNotFoundError, IOError) as e: logger.error(f"Prompt format error: {e}"); raise AgentError(f"Prompt error: {e}") from e
        raw_llm_response = None
        try:
            logger.info(f"Requesting next action from LLM: {self.planning_model}")
            raw_llm_response = await self.llm_client.get_completion( prompt=formatted_prompt, model=self.planning_model, temperature=self.planning_temp, max_tokens=self.planning_max_tokens, response_format="json_object" )
            logger.debug("Parsing LLM action decision response...")
            next_step = self._parse_llm_step_decision(raw_llm_response, planner_step_id)
            if next_step is None: raise AgentError("Failed to parse valid next action from LLM.")
            logger.info(f"LLM decided next action (Step {next_step.id}): Tool='{next_step.tool_name}'")
            return next_step
        except LLMError as e: logger.error(f"LLM error during planning: {e}"); raise AgentError(f"LLM error: {e}") from e
        except (json.JSONDecodeError, ValueError, TypeError) as e: logger.error(f"Parse/validation error: {e}. Raw: {raw_llm_response}", exc_info=True); raise AgentError(f"LLM response parse error: {e}") from e
        except AgentError as e: raise e
        except Exception as e: logger.error(f"Unexpected planning error: {e}. Raw: {raw_llm_response}", exc_info=True); raise AgentError(f"Unexpected planning error: {e}") from e

    def _parse_llm_step_decision(self, raw_response: Optional[str], step_id: int) -> Optional[Step]:
        """Parses the raw JSON string from LLM into a single Step object."""
        if not raw_response: logger.error("LLM returned empty response."); return None
        try:
            response_text = raw_response.strip();
            if response_text.startswith("```json"): response_text = response_text[7:]
            if response_text.endswith("```"): response_text = response_text[:-3]
            response_text = response_text.strip();
            if not response_text: raise ValueError("Cleaned response empty.")
            action_data = json.loads(response_text)
            if not isinstance(action_data, dict): raise ValueError("Not JSON object.")
            tool_name = action_data.get("tool_name"); arguments = action_data.get("arguments"); thought = action_data.get("thought")
            if not isinstance(tool_name, str) or not tool_name: raise ValueError("'tool_name' missing/invalid.")
            if not isinstance(arguments, dict):
                if arguments is None and tool_name in ["observe_page"]: arguments = {}
                else: raise ValueError(f"'arguments' must be dict for '{tool_name}'. Got: {type(arguments)}")
            if thought and not isinstance(thought, str): logger.warning("Ignoring non-string 'thought'."); thought = None
            # Correct Instantiation: Include default values for optional fields
            return Step( id=step_id, tool_name=tool_name, arguments=arguments, thought=thought, status="pending", result=None, error_message=None )
        except (json.JSONDecodeError, ValueError, TypeError) as e:
            logger.error(f"Error parsing/validating LLM step decision: {e}. Raw: {raw_response}", exc_info=True)
            raise ValueError(f"Failed to parse valid step from LLM: {e}") from e

    async def generate_initial_plan(self, task: Task) -> Plan:
        logger.warning("generate_initial_plan not implemented.")
        raise NotImplementedError("Multi-step initial planning is not implemented yet.")