# Purpose: Analyzes results/errors and suggests corrections using LLM.
# Changes:
# - Added 'os' and 'json' imports.
# - Removed direct call to get_available_tool_descriptions.
# - Modified reflect_on_step_error to accept tool_descriptions string as argument.

import logging
import json # Added import
import os   # Added import
from typing import Optional, List, Dict, Any

# Use relative imports
from ..llm_interface.base import LLMClient
from ..llm_interface.openai_client import OpenAIClient
from ..core.config import get_config_value
from ..core.exceptions import AgentError, LLMError, ConfigError
from .agent_types import Task, Plan, Step, AgentObservation, AgentContext

# Import prompt formatting utility
from ..llm_interface.prompt_builder import format_prompt

# Use core logger setup
from ..core.logging_setup import get_logger
logger = get_logger(__name__)


class Reflector:
    """Analyzes execution results and suggests corrections using an LLM."""

    def __init__(self, llm_client: Optional[LLMClient] = None):
        """Initializes the reflector with an LLM client."""
        if llm_client:
            self.llm_client = llm_client
        else:
            try:
                if not os.getenv("OPENAI_API_KEY"): # Check env var
                     raise ConfigError("Cannot initialize default OpenAIClient: OPENAI_API_KEY not set.")
                self.llm_client = OpenAIClient()
                logger.info("Reflector initialized with default OpenAIClient.")
            except (ConfigError, ImportError, LLMError) as e:
                logger.error(f"Failed to initialize default LLMClient for Reflector: {e}")
                raise AgentError(f"LLM Client configuration/initialization error for Reflector: {e}") from e

        self.reflection_model = get_config_value("agent.reflection_model", "gpt-4o")
        self.reflection_temp = get_config_value("agent.reflection_temperature", 0.4)
        self.reflection_max_tokens = get_config_value("agent.reflection_max_tokens", 1000)


    async def reflect_on_step_error(
        self,
        context: AgentContext,
        failed_step: Step,
        available_tools_desc: str # Accept descriptions as argument
    ) -> Optional[str]:
        """Uses LLM to reflect on why a step failed."""
        logger.warning(f"Reflector: Reflecting on failed Step {failed_step.id} (Tool: {failed_step.tool_name}, Error: {failed_step.error_message})")

        # --- Prepare Prompt Context ---
        try:
            # Safely serialize arguments
            args_str = json.dumps(failed_step.arguments)
        except TypeError:
             args_str = repr(failed_step.arguments) # Fallback if args not JSON serializable

        obs_summary = "N/A"
        if context.current_observation:
             obs_summary = context.current_observation.summary or f"Observation Type: {context.current_observation.observation_type}"
             # Maybe include observation error if available?
             if hasattr(context.current_observation.data, 'error_message') and context.current_observation.data.error_message:
                  obs_summary += f" (Observation Error: {context.current_observation.data.error_message})"


        prompt_context = {
            "task_description": context.task.description,
            "failed_step_id": failed_step.id,
            "failed_tool": failed_step.tool_name,
            "failed_args": args_str,
            "error_message": failed_step.error_message or "No error message provided.",
            "step_history": "\n".join([f"Step {s.id}: Tool={s.tool_name}, Status={s.status.upper()}" for s in context.step_history[-5:]]),
            "current_observation_summary": obs_summary,
            "available_tools": available_tools_desc # Use passed descriptions
        }

        # --- Format Prompt ---
        try:
            formatted_prompt = format_prompt("reflection/analyze_error_and_correct", prompt_context)
            # logger.debug(f"Formatted reflection prompt:\n{formatted_prompt}")
        except (ValueError, FileNotFoundError, IOError) as e:
            logger.error(f"Failed to format reflection prompt: {e}")
            return None # Return None if prompt formatting fails

        # --- Call LLM ---
        raw_llm_response = None
        try:
            logger.info(f"Requesting reflection from LLM: {self.reflection_model}")
            raw_llm_response = await self.llm_client.get_completion(
                prompt=formatted_prompt,
                model=self.reflection_model,
                temperature=self.reflection_temp,
                max_tokens=self.reflection_max_tokens,
                response_format="text",
            )

            if raw_llm_response:
                logger.info(f"Reflector: Received reflection insight.")
                logger.debug(f"Reflection insight: {raw_llm_response[:500]}...")
                return raw_llm_response.strip()
            else:
                logger.warning("Reflector: LLM returned empty response for reflection.")
                return None

        except LLMError as e:
             logger.error(f"LLM error during reflection: {e}")
             return f"Reflection failed due to LLM error: {e}" # Return error as insight
        except Exception as e:
             logger.error(f"Unexpected error during reflection: {e}", exc_info=True)
             logger.error(f"Raw LLM Response (if available):\n{raw_llm_response}")
             return f"Unexpected error during reflection: {e}"