#!/usr/bin/env python3

import argparse
import asyncio
import json
import logging
import os
import sys
import threading
import time
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional

import yaml
from dotenv import load_dotenv
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.tools import tool
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.errors import GraphRecursionError

from .agent_state import AgentState
from .graph_builder import build_multi_agent_graph

# Configure logging with basicConfig
logging.basicConfig(
    level=logging.INFO,  # Set the log level to INFO
    # Define log message format
    format="%(asctime)s,p%(process)s,{%(filename)s:%(lineno)d},%(levelname)s,%(message)s",
)

# Suppress HTTP request logs from showing in console
http_loggers = [
    "httpx",
    "httpcore",
    "streamable_http",
    "mcp.client.streamable_http",
    # 'anthropic._client',
    # 'anthropic._base_client'
]

for logger_name in http_loggers:
    http_logger = logging.getLogger(logger_name)
    http_logger.setLevel(logging.WARNING)  # Only show warnings and errors in console

logger = logging.getLogger(__name__)

# Load environment variables from .env file in sre_agent directory
load_dotenv(Path(__file__).parent / ".env")


class Spinner:
    """Simple spinner animation with elapsed time display."""

    def __init__(self, message: str = "Thinking", show_time: bool = True):
        self.message = message
        self.show_time = show_time
        self.spinning = False
        self.thread = None
        self.start_time = None
        self.spinner_chars = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

    def __enter__(self):
        self.start()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stop()

    def start(self):
        """Start the spinner animation."""
        self.spinning = True
        self.start_time = time.time()
        self.thread = threading.Thread(target=self._spin)
        self.thread.daemon = True
        self.thread.start()

    def stop(self):
        """Stop the spinner animation."""
        if self.spinning:
            self.spinning = False
            if self.thread:
                self.thread.join()
            # Clear the spinner line
            sys.stdout.write("\r" + " " * 50 + "\r")
            sys.stdout.flush()

    def _spin(self):
        """Internal method to run the spinner animation."""
        i = 0
        while self.spinning:
            elapsed = time.time() - self.start_time
            if self.show_time:
                time_str = f" ({elapsed:.1f}s)"
            else:
                time_str = ""

            spinner_char = self.spinner_chars[i % len(self.spinner_chars)]
            sys.stdout.write(f"\r{spinner_char} {self.message}{time_str}")
            sys.stdout.flush()
            time.sleep(0.1)
            i += 1


def _save_final_response_to_markdown(
    query: str,
    final_response: str,
    timestamp: Optional[datetime] = None,
    output_dir: str = ".",
    filename_prefix: str = "sre_investigation",
) -> str:
    """Save final response to a markdown file."""
    if timestamp is None:
        timestamp = datetime.now()

    # Create output directory if it doesn't exist
    output_path = Path(output_dir)
    output_path.mkdir(parents=True, exist_ok=True)

    # Create filename with query and timestamp
    # Clean the query string for filename use
    clean_query = query.replace(" ", "_").replace("/", "_").replace("\\", "_").replace("?", "_").replace(":", "_").replace(",", "_").replace(".", "_")
    # Remove special characters that might cause issues
    clean_query = "".join(c for c in clean_query if c.isalnum() or c in "_-")
    # Remove leading/trailing underscores and collapse multiple underscores
    clean_query = "_".join(part for part in clean_query.split("_") if part)
    # Limit length to avoid overly long filenames (increased from 50 to 80 for better descriptiveness)
    if len(clean_query) > 80:
        clean_query = clean_query[:80]
    # Ensure we have a meaningful filename
    if not clean_query or len(clean_query) < 3:
        clean_query = "query"
    
    timestamp_str = timestamp.strftime("%Y%m%d_%H%M%S")
    filename = f"{clean_query}_{timestamp_str}.md"
    filepath = output_path / filename

    # Create markdown content
    markdown_content = f"""# SRE Investigation Report

**Generated:** {timestamp.strftime("%Y-%m-%d %H:%M:%S")}

**Query:** {query}

---

{final_response}

---
*Report generated by SRE Multi-Agent Assistant*
"""

    try:
        # Write to file
        with open(filepath, "w", encoding="utf-8") as f:
            f.write(markdown_content)

        logger.info(f"Final response saved to: {filepath}")
        return str(filepath)

    except Exception as e:
        logger.error(f"Failed to save final response to markdown: {e}")
        print(f"❌ Failed to save report: {e}")
        return ""


@tool
def get_current_time() -> str:
    """Get current date and time in ISO format.

    This tool provides the current timestamp which is essential for debugging
    time-sensitive issues and correlating events across different systems.

    Returns:
        str: Current datetime in ISO format (YYYY-MM-DDTHH:MM:SS)
    """
    return datetime.now().isoformat()


def _get_anthropic_api_key() -> str:
    """Get Anthropic API key from environment variables."""
    api_key = os.getenv("ANTHROPIC_API_KEY")
    if not api_key:
        raise ValueError(
            "ANTHROPIC_API_KEY environment variable is required for Anthropic provider"
        )
    return api_key


def _read_gateway_config() -> tuple[str, str]:
    """Read gateway URI from config and access token from environment."""
    try:
        # Load environment variables from sre_agent directory
        load_dotenv(Path(__file__).parent / ".env")

        # Read gateway URI from agent_config.yaml
        config_path = Path(__file__).parent / "config" / "agent_config.yaml"
        with open(config_path, "r") as f:
            config = yaml.safe_load(f)

        gateway_uri = config.get("gateway", {}).get("uri")
        if not gateway_uri:
            raise ValueError(
                "Gateway URI not found in agent_config.yaml under 'gateway.uri'"
            )

        # Read access token from environment
        access_token = os.getenv("GATEWAY_ACCESS_TOKEN")
        if not access_token:
            raise ValueError("GATEWAY_ACCESS_TOKEN environment variable is required")

        return gateway_uri.rstrip("/"), access_token
    except Exception as e:
        logger.error(f"Error reading gateway configuration: {e}")
        raise


def create_mcp_client() -> MultiServerMCPClient:
    """Create and return MultiServerMCPClient with gateway configuration."""
    gateway_uri, access_token = _read_gateway_config()

    # Configure MCP server connection
    client = MultiServerMCPClient(
        {
            "gateway": {
                "url": f"{gateway_uri}/mcp",
                "transport": "streamable_http",
                "headers": {"Authorization": f"Bearer {access_token}"},
            }
        }
    )

    return client


async def create_multi_agent_system(
    provider: str = "anthropic", checkpointer=None, **llm_kwargs
):
    """Create multi-agent system with MCP tools."""
    logger.info(f"Creating multi-agent system with provider: {provider}")

    # Get Anthropic API key if needed
    if provider == "anthropic" and not llm_kwargs.get("api_key"):
        llm_kwargs["api_key"] = _get_anthropic_api_key()

    # Create MCP client and get tools
    mcp_tools = []
    try:
        client = create_mcp_client()
        all_mcp_tools = await client.get_tools()

        # Don't filter out x-amz-agentcore-search as it's a global tool
        mcp_tools = all_mcp_tools

        logger.info(f"Retrieved {len(mcp_tools)} tools from MCP")

        # Print tool information
        print(f"\nMCP tools loaded: {len(mcp_tools)}")
        for tool in mcp_tools:
            tool_name = getattr(tool, "name", "unknown")
            tool_desc = getattr(tool, "description", "No description")
            print(f"  - {tool_name}: {tool_desc[:80]}...")

    except Exception as e:
        logger.warning(f"Failed to load MCP tools: {e}")
        mcp_tools = []

    # Combine local tools with MCP tools
    local_tools = [get_current_time]
    all_tools = local_tools + mcp_tools

    print(f"\nAdditional local tools: {len(local_tools)}")
    for tool in local_tools:
        # Extract just the first line of description
        description = (
            tool.description.split("\n")[0].strip()
            if tool.description
            else "No description"
        )
        print(f"  - {tool.name}: {description}")

    # Build the multi-agent graph
    graph = build_multi_agent_graph(
        tools=all_tools, llm_provider=provider, **llm_kwargs
    )

    return graph, all_tools


def _save_conversation_state(
    messages: list,
    state: Dict[str, Any],
    filename: str = ".multi_agent_conversation_state.json",
):
    """Save conversation state to a file."""
    try:
        # Convert messages to serializable format
        serializable_messages = []
        for msg in messages:
            if hasattr(msg, "model_dump"):
                serializable_messages.append(msg.model_dump())
            elif hasattr(msg, "dict"):
                serializable_messages.append(msg.dict())
            elif hasattr(msg, "content"):
                serializable_messages.append(
                    {"role": getattr(msg, "role", "unknown"), "content": msg.content}
                )
            else:
                serializable_messages.append(str(msg))

        # Convert state to serializable format
        serializable_state = {}
        if isinstance(state, dict):
            # Filter out non-serializable items
            for k, v in state.items():
                if k == "messages":
                    continue  # Already handled above
                elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
                    serializable_state[k] = v
                else:
                    serializable_state[k] = str(v)

        with open(filename, "w") as f:
            json.dump(
                {
                    "messages": serializable_messages,
                    "state": serializable_state,
                    "timestamp": datetime.now().isoformat(),
                },
                f,
                indent=2,
            )
        logger.debug(f"Saved conversation state to {filename}")
    except Exception as e:
        logger.error(f"Failed to save conversation state: {e}")


def _load_conversation_state(
    filename: str = ".multi_agent_conversation_state.json",
) -> tuple[Optional[list], Optional[Dict[str, Any]]]:
    """Load conversation state from a file."""
    try:
        if Path(filename).exists():
            with open(filename, "r") as f:
                data = json.load(f)
                logger.info(f"Loaded conversation state from {filename}")
                return data.get("messages", []), data.get("state", {})
    except Exception as e:
        logger.error(f"Failed to load conversation state: {e}")
    return None, None


async def _run_interactive_session(
    provider: str,
    save_state: bool = True,
    output_dir: str = "./reports",
    save_markdown: bool = True,
):
    """Run an interactive multi-turn conversation session."""
    # Buffer to store last query and response for /savereport command
    last_query = None
    last_response = None
    # Track the original query for report naming (resets after each /savereport)
    original_query = None
    print("\n🤖 Starting interactive multi-agent SRE assistant...")
    print("Commands:")
    print("  /exit or /quit - End the session")
    print("  /clear - Clear conversation history") 
    print("  /save - Save conversation state")
    print("  /load - Load previous conversation state")
    print("  /savereport - Save the last query's investigation report")
    print("  /history - Show conversation history")
    print("  /agents - Show available agents")
    print("  /help - Show this help message")
    print("\nNote: Investigation reports are not saved automatically in interactive mode.")
    print("      Use /savereport to save the last query's report when needed.")
    print("\n" + "=" * 80 + "\n")

    # Load previous conversation if exists
    saved_messages, saved_state = None, None
    if save_state:
        saved_messages, saved_state = _load_conversation_state()

    # Create multi-agent system
    graph, all_tools = await create_multi_agent_system(provider)

    # Initialize conversation state
    messages = []
    if saved_messages:
        # Convert saved messages to LangChain format
        for msg in saved_messages:
            if isinstance(msg, dict):
                if msg.get("role") == "user":
                    messages.append(HumanMessage(content=msg.get("content", "")))
                elif msg.get("role") == "assistant":
                    messages.append(AIMessage(content=msg.get("content", "")))

    while True:
        try:
            # Get user input
            user_input = input("\n👤 You: ").strip()

            # Handle commands
            if user_input.lower() in ["/exit", "/quit"]:
                print("\n👋 Goodbye!")
                if save_state and messages:
                    _save_conversation_state(messages, {})
                break

            elif user_input.lower() == "/clear":
                messages = []
                last_query = None
                last_response = None
                original_query = None
                print("✨ Conversation history and report buffer cleared.")
                continue

            elif user_input.lower() == "/save":
                _save_conversation_state(messages, {})
                print("💾 Conversation state saved.")
                continue

            elif user_input.lower() == "/load":
                loaded_messages, loaded_state = _load_conversation_state()
                if loaded_messages is not None:
                    messages = []
                    for msg in loaded_messages:
                        if isinstance(msg, dict):
                            if msg.get("role") == "user":
                                messages.append(
                                    HumanMessage(content=msg.get("content", ""))
                                )
                            elif msg.get("role") == "assistant":
                                messages.append(
                                    AIMessage(content=msg.get("content", ""))
                                )
                    print("📂 Previous conversation loaded.")
                else:
                    print("❌ No saved conversation found.")
                continue

            elif user_input.lower() == "/savereport":
                if original_query and last_response:
                    filepath = _save_final_response_to_markdown(
                        original_query,
                        last_response,
                        output_dir=output_dir,
                    )
                    if filepath:
                        print(f"📄 Investigation report saved to: {filepath}")
                        # Clear the buffer after saving and reset for next investigation
                        last_query = None
                        last_response = None
                        original_query = None
                    else:
                        print("❌ Failed to save report")
                else:
                    print("❌ No investigation report available to save. Complete a query first.")
                continue

            elif user_input.lower() == "/history":
                print("\n📜 Conversation History:")
                for msg in messages:
                    if hasattr(msg, "content"):
                        role = type(msg).__name__.replace("Message", "").lower()
                        content = msg.content
                        print(
                            f"{role.upper()}: {content[:100]}..."
                            if len(content) > 100
                            else f"{role.upper()}: {content}"
                        )
                continue

            elif user_input.lower() == "/agents":
                print("\n🤝 Available Agents:")
                print("  1. Supervisor Agent - Orchestrates and routes queries")
                print(
                    "  2. Kubernetes Infrastructure Agent - K8s operations and monitoring"
                )
                print("  3. Application Logs Agent - Log analysis and searching")
                print(
                    "  4. Performance Metrics Agent - Performance and resource metrics"
                )
                print(
                    "  5. Operational Runbooks Agent - Procedures and troubleshooting guides"
                )
                continue

            elif user_input.lower() == "/help":
                print("\n🤖 SRE Multi-Agent Assistant Help")
                print("=" * 50)
                print("\nCommands:")
                print("  /exit or /quit - End the session")
                print("  /clear - Clear conversation history")
                print("  /save - Save conversation state")
                print("  /load - Load previous conversation state")
                print("  /savereport - Save the last query's investigation report")
                print("  /history - Show conversation history")
                print("  /agents - Show available agents")
                print("  /help - Show this help message")
                print("\nReport Saving:")
                print("  • Investigation reports are NOT saved automatically in interactive mode")
                print("  • Use /savereport after completing a query to save its report")
                print("  • Reports are saved as markdown files with descriptive names")
                print("  • Use /save to save conversation state separately")
                print("\nTips:")
                print("  • Ask specific questions about infrastructure, logs, metrics, or procedures")
                print("  • The agents will collaborate to provide comprehensive answers")
                print("  • You can continue conversations and ask follow-up questions")
                continue

            if not user_input:
                continue

            # Track original query for report naming (only set if not already set)
            if original_query is None:
                original_query = user_input

            # Process with multi-agent system
            print("\n🤖 Multi-Agent System: Processing...\n")

            # Add user message
            messages.append(HumanMessage(content=user_input))

            # Create initial state
            initial_state: AgentState = {
                "messages": messages,
                "next": "supervisor",
                "agent_results": {},
                "current_query": user_input,
                "metadata": {},
                "requires_collaboration": False,
                "agents_invoked": [],
                "final_response": None,
            }

            # Stream the graph execution
            try:
                # Start initial spinner for supervisor
                spinner = Spinner("🧭 Supervisor analyzing query")
                spinner.start()

                async for event in graph.astream(initial_state):
                    # Stop spinner when we get an event
                    if spinner:
                        spinner.stop()
                        spinner = None

                    # Print progress updates
                    for node_name, node_output in event.items():
                        if node_name == "supervisor":
                            next_agent = node_output.get("next", "unknown")
                            metadata = node_output.get("metadata", {})
                            reasoning = metadata.get("routing_reasoning", "")

                            # Display investigation plan only once when first created
                            if metadata.get("plan_pending_approval"):
                                plan_text = metadata.get("plan_text", "")
                                if plan_text:
                                    print(f"\n📋 {plan_text}")
                            elif metadata.get("show_plan") and not metadata.get(
                                "plan_shown"
                            ):
                                plan_text = metadata.get("plan_text", "")
                                if plan_text:
                                    print(f"\n📋 {plan_text}")
                                # Mark plan as shown to avoid repetition
                                metadata["plan_shown"] = True

                            if next_agent != "FINISH":
                                print(f"🧭 Supervisor: Routing to {next_agent}")
                                if reasoning:
                                    print(f"   Reasoning: {reasoning}")
                                # Start spinner for next agent
                                agent_display = next_agent.replace("_", " ").title()
                                spinner = Spinner(f"🤖 {agent_display} thinking")
                                spinner.start()
                            elif metadata.get("plan_pending_approval"):
                                print("🧭 Supervisor: Plan created, awaiting approval")

                        elif node_name in [
                            "kubernetes_agent",
                            "logs_agent",
                            "metrics_agent",
                            "runbooks_agent",
                        ]:
                            agent_name = node_name.replace("_agent", "").title()
                            print(f"\n🔧 {agent_name} Agent:")

                            # Extract and display tool traces from metadata
                            metadata = node_output.get("metadata", {})
                            # Look for traces using various possible key formats
                            agent_messages = []
                            for key, value in metadata.items():
                                if "_trace" in key and isinstance(value, list):
                                    agent_messages = value
                                    break

                            # Show debug info about trace messages found
                            print(
                                f"   🔍 DEBUG: agent_messages = {len(agent_messages) if agent_messages else 0}"
                            )
                            if agent_messages:
                                print(
                                    f"   📋 Found {len(agent_messages)} trace messages:"
                                )
                                for i, msg in enumerate(agent_messages):
                                    msg_type = type(msg).__name__
                                    if hasattr(msg, "content"):
                                        content_preview = str(
                                            msg.content
                                        )  # Show full content
                                    else:
                                        content_preview = "No content"
                                    print(f"      {i+1}. {msg_type}: {content_preview}")
                                    if hasattr(msg, "tool_calls") and msg.tool_calls:
                                        print(
                                            f"         Tool calls: {len(msg.tool_calls)}"
                                        )
                                    if hasattr(msg, "tool_call_id"):
                                        print(
                                            f"         Tool response for: {getattr(msg, 'tool_call_id', 'unknown')}"
                                        )
                            else:
                                print("   ⚠️  No trace messages found in metadata")

                            # Display tool calls and results like in langgraph_agent.py
                            for msg in agent_messages:
                                if hasattr(msg, "tool_calls") and msg.tool_calls:
                                    print("   📞 Calling tools:")
                                    for tc in msg.tool_calls:
                                        tool_name = tc.get("name", "unknown")
                                        tool_args = tc.get("args", {})
                                        tool_id = tc.get("id", "unknown")
                                        print(f"      {tool_name}(")
                                        if tool_args:
                                            for (
                                                arg_name,
                                                arg_value,
                                            ) in tool_args.items():
                                                # Show full values
                                                value_str = repr(arg_value)
                                                print(f"        {arg_name}={value_str}")
                                        print(f"      ) [id: {tool_id}]")

                                elif hasattr(msg, "tool_call_id"):
                                    # This is a tool response
                                    tool_name = getattr(msg, "name", "unknown_tool")
                                    tool_call_id = getattr(
                                        msg, "tool_call_id", "unknown"
                                    )
                                    result_content = msg.content

                                    print(f"   🛠️  {tool_name} [id: {tool_call_id}]:")
                                    if isinstance(result_content, str):
                                        try:
                                            parsed_result = json.loads(result_content)
                                            # Pretty print full output
                                            formatted = json.dumps(
                                                parsed_result, indent=2
                                            )
                                            lines = formatted.split("\n")
                                            for line in lines:
                                                print(f"      {line}")
                                        except:
                                            # Not JSON, print full string
                                            lines = result_content.split("\n")
                                            for line in lines:
                                                print(f"      {line}")

                            # Show agent's full final response
                            agent_results = node_output.get("agent_results", {})
                            for agent_key, result in agent_results.items():
                                if (
                                    agent_key in node_name
                                    or node_name.replace("_agent", "")
                                    in agent_key.lower()
                                ):
                                    if result:
                                        print("   💡 Full Response:")
                                        print(f"      {result}")

                        elif node_name == "aggregate":
                            final_response = node_output.get("final_response", "")
                            if final_response:
                                print(f"\n💬 Final Response:\n{final_response}")
                                # Add assistant message to history
                                messages.append(AIMessage(content=final_response))
                                # Store for /savereport command instead of auto-saving
                                if save_markdown:
                                    last_query = user_input
                                    last_response = final_response
                                    print("\n💡 Use /savereport to save this investigation report.")

            except GraphRecursionError:
                if spinner:
                    spinner.stop()
                print(
                    "\n❌ Error: Maximum recursion limit reached. The agents may be stuck in a loop."
                )
                print("💡 Tip: Try rephrasing your question or being more specific.")
            except Exception as e:
                if spinner:
                    spinner.stop()
                logger.error(f"Error in multi-agent execution: {e}")
                print(f"\n❌ Error: {e}")
            finally:
                # Always clean up spinner
                if spinner:
                    spinner.stop()

            # Auto-save after each turn if enabled
            if save_state:
                _save_conversation_state(messages, {})

        except KeyboardInterrupt:
            print("\n\n⚠️  Interrupted. Type /exit to quit.")
            continue
        except Exception as e:
            logger.error(f"Error in conversation: {e}")
            print(f"\n❌ Error: {e}")


async def main():
    """Main function for control flow."""
    parser = argparse.ArgumentParser(
        description="Multi-agent SRE assistant with specialized agents"
    )
    parser.add_argument(
        "--provider",
        choices=["bedrock", "anthropic"],
        default="anthropic",
        help="Model provider to use (default: anthropic)",
    )
    parser.add_argument(
        "--prompt",
        help="Single prompt to send to the multi-agent system (if not provided, starts interactive mode)",
    )
    parser.add_argument(
        "--interactive",
        "-i",
        action="store_true",
        help="Start interactive multi-turn conversation mode",
    )
    parser.add_argument(
        "--no-save",
        action="store_true",
        help="Disable automatic conversation state saving in interactive mode",
    )
    parser.add_argument(
        "--output-dir",
        default="./reports",
        help="Directory to save investigation reports (default: ./reports)",
    )
    parser.add_argument(
        "--no-markdown",
        action="store_true",
        help="Disable saving final responses to markdown files",
    )

    args = parser.parse_args()

    logger.info(f"Starting multi-agent system with provider: {args.provider}")

    try:
        # Interactive mode
        if args.interactive or not args.prompt:
            await _run_interactive_session(
                provider=args.provider,
                save_state=not args.no_save,
                output_dir=args.output_dir,
                save_markdown=not args.no_markdown,
            )
        # Single prompt mode
        else:
            graph, all_tools = await create_multi_agent_system(args.provider)
            logger.info("Multi-agent system created successfully")

            # Create initial state
            initial_state: AgentState = {
                "messages": [HumanMessage(content=args.prompt)],
                "next": "supervisor",
                "agent_results": {},
                "current_query": args.prompt,
                "metadata": {},
                "requires_collaboration": False,
                "agents_invoked": [],
                "final_response": None,
            }

            print("🤖 Multi-Agent System:\n")

            # Execute the graph
            # Start initial spinner for supervisor
            spinner = Spinner("🧭 Supervisor analyzing query")
            spinner.start()

            try:
                async for event in graph.astream(initial_state):
                    # Stop spinner when we get an event
                    if spinner:
                        spinner.stop()
                        spinner = None

                    for node_name, node_output in event.items():
                        if node_name == "supervisor":
                            next_agent = node_output.get("next", "unknown")
                            metadata = node_output.get("metadata", {})
                            reasoning = metadata.get("routing_reasoning", "")

                            # Display investigation plan only once when first created
                            if metadata.get("plan_pending_approval"):
                                plan_text = metadata.get("plan_text", "")
                                if plan_text:
                                    print(f"\n📋 {plan_text}")
                            elif metadata.get("show_plan") and not metadata.get(
                                "plan_shown"
                            ):
                                plan_text = metadata.get("plan_text", "")
                                if plan_text:
                                    print(f"\n📋 {plan_text}")
                                # Mark plan as shown to avoid repetition
                                metadata["plan_shown"] = True

                            if next_agent != "FINISH":
                                print(f"🧭 Supervisor: Routing to {next_agent}")
                                if reasoning:
                                    print(f"   Reasoning: {reasoning}")
                                # Start spinner for next agent
                                agent_display = next_agent.replace("_", " ").title()
                                spinner = Spinner(f"🤖 {agent_display} thinking")
                                spinner.start()
                            elif metadata.get("plan_pending_approval"):
                                print("🧭 Supervisor: Plan created, awaiting approval")

                        elif node_name in [
                            "kubernetes_agent",
                            "logs_agent",
                            "metrics_agent",
                            "runbooks_agent",
                        ]:
                            agent_name = node_name.replace("_agent", "").title()
                            print(f"\n🔧 {agent_name} Agent:")

                            # Extract and display tool traces from metadata
                            metadata = node_output.get("metadata", {})
                            # Look for traces using various possible key formats
                            agent_messages = []
                            for key, value in metadata.items():
                                if "_trace" in key and isinstance(value, list):
                                    agent_messages = value
                                    break

                            # Show debug info about trace messages found
                            print(
                                f"   🔍 DEBUG: agent_messages = {len(agent_messages) if agent_messages else 0}"
                            )
                            if agent_messages:
                                print(
                                    f"   📋 Found {len(agent_messages)} trace messages:"
                                )
                                for i, msg in enumerate(agent_messages):
                                    msg_type = type(msg).__name__
                                    if hasattr(msg, "content"):
                                        content_preview = str(
                                            msg.content
                                        )  # Show full content
                                    else:
                                        content_preview = "No content"
                                    print(f"      {i+1}. {msg_type}: {content_preview}")
                                    if hasattr(msg, "tool_calls") and msg.tool_calls:
                                        print(
                                            f"         Tool calls: {len(msg.tool_calls)}"
                                        )
                                    if hasattr(msg, "tool_call_id"):
                                        print(
                                            f"         Tool response for: {getattr(msg, 'tool_call_id', 'unknown')}"
                                        )
                            else:
                                print("   ⚠️  No trace messages found in metadata")

                            # Display tool calls and results like in langgraph_agent.py
                            for msg in agent_messages:
                                if hasattr(msg, "tool_calls") and msg.tool_calls:
                                    print("   📞 Calling tools:")
                                    for tc in msg.tool_calls:
                                        tool_name = tc.get("name", "unknown")
                                        tool_args = tc.get("args", {})
                                        tool_id = tc.get("id", "unknown")
                                        print(f"      {tool_name}(")
                                        if tool_args:
                                            for (
                                                arg_name,
                                                arg_value,
                                            ) in tool_args.items():
                                                # Show full values
                                                value_str = repr(arg_value)
                                                print(f"        {arg_name}={value_str}")
                                        print(f"      ) [id: {tool_id}]")

                                elif hasattr(msg, "tool_call_id"):
                                    # This is a tool response
                                    tool_name = getattr(msg, "name", "unknown_tool")
                                    tool_call_id = getattr(
                                        msg, "tool_call_id", "unknown"
                                    )
                                    result_content = msg.content

                                    print(f"   🛠️  {tool_name} [id: {tool_call_id}]:")
                                    if isinstance(result_content, str):
                                        try:
                                            parsed_result = json.loads(result_content)
                                            # Pretty print full output
                                            formatted = json.dumps(
                                                parsed_result, indent=2
                                            )
                                            lines = formatted.split("\n")
                                            for line in lines:
                                                print(f"      {line}")
                                        except:
                                            # Not JSON, print full string
                                            lines = result_content.split("\n")
                                            for line in lines:
                                                print(f"      {line}")

                            # Show agent's full final response
                            agent_results = node_output.get("agent_results", {})
                            for agent_key, result in agent_results.items():
                                if (
                                    agent_key in node_name
                                    or node_name.replace("_agent", "")
                                    in agent_key.lower()
                                ):
                                    if result:
                                        print("   💡 Full Response:")
                                        print(f"      {result}")

                        elif node_name == "aggregate":
                            final_response = node_output.get("final_response", "")
                            if final_response:
                                print(f"\n💬 Final Response:\n{final_response}")
                                # Save final response to markdown file (auto-save in single query mode)
                                if not args.no_markdown:
                                    _save_final_response_to_markdown(
                                        args.prompt,
                                        final_response,
                                        output_dir=args.output_dir,
                                    )
            finally:
                # Always clean up spinner
                if spinner:
                    spinner.stop()

    except Exception as e:
        logger.error(f"Error in multi-agent system: {e}")
        raise


if __name__ == "__main__":
    asyncio.run(main())
