"""
LangGraph workflow definition for attribution analysis.
"""
from typing import Dict, Any, Literal
from langgraph.graph import StateGraph, END
from .state import AttributionState
from .nodes import (
    understand_query,
    fetch_data,
    detect_anomalies,
    perform_drill_down,
    decompose_metric,
    perform_attribution_reasoning,
    generate_report
)
import pandas as pd


def create_attribution_graph() -> StateGraph:
    """
    Create the attribution analysis LangGraph workflow.

    Workflow:
    1. understand_query: Parse user's natural language query
    2. fetch_data: Load data for analysis
    3. detect_anomalies: Identify anomalies in the metric
    4. perform_drill_down: Drill down by dimensions (conditional, may loop)
    5. decompose_metric: Decompose complex metrics
    6. perform_attribution_reasoning: Synthesize findings with LLM
    7. generate_report: Create final report

    Returns:
        Compiled StateGraph
    """
    # Create graph
    workflow = StateGraph(AttributionState)

    # Add nodes
    workflow.add_node("understand", understand_query)
    workflow.add_node("fetch_data", fetch_data)
    workflow.add_node("detect_anomalies", detect_anomalies)
    workflow.add_node("drill_down", perform_drill_down)
    workflow.add_node("decompose", decompose_metric)
    workflow.add_node("reasoning", perform_attribution_reasoning)
    workflow.add_node("report", generate_report)

    # Set entry point
    workflow.set_entry_point("understand")

    # Add edges
    workflow.add_edge("understand", "fetch_data")
    workflow.add_edge("fetch_data", "detect_anomalies")

    # Conditional edge from detect_anomalies
    workflow.add_conditional_edges(
        "detect_anomalies",
        should_drill_down,
        {
            "drill_down": "drill_down",
            "decompose": "decompose"
        }
    )

    # Conditional edge from drill_down (may loop or continue)
    workflow.add_conditional_edges(
        "drill_down",
        should_continue_drilling,
        {
            "drill_down": "drill_down",  # Loop back for deeper drill-down
            "reasoning": "reasoning"      # Move to reasoning
        }
    )

    workflow.add_edge("decompose", "reasoning")
    workflow.add_edge("reasoning", "report")
    workflow.add_edge("report", END)

    # Compile graph
    return workflow.compile()


def should_drill_down(state: Dict[str, Any]) -> Literal["drill_down", "decompose"]:
    """
    Determine whether to perform drill-down analysis.

    Drill down if anomalies are detected, otherwise go to decomposition.
    """
    anomalies = state.get('anomalies', [])
    if anomalies and len(anomalies) > 0:
        return "drill_down"
    return "decompose"


def should_continue_drilling(state: Dict[str, Any]) -> Literal["drill_down", "reasoning"]:
    """
    Determine whether to continue drilling down or move to reasoning.

    Continue drilling if:
    - should_continue_drill flag is True
    - Haven't reached max depth
    - There are significant contributors to drill into
    """
    should_continue = state.get('should_continue_drill', False)

    if should_continue:
        return "drill_down"
    return "reasoning"


class AttributionAgent:
    """
    Attribution Analysis Agent.

    Main interface for running attribution analysis.
    """

    def __init__(self, config: Dict[str, Any] = None):
        """
        Initialize the Attribution Agent.

        Args:
            config: Configuration dictionary with optional settings:
                - llm_model: LLM model to use (default: 'gpt-4')
                - temperature: LLM temperature (default: 0.1)
                - max_drill_depth: Maximum drill-down depth (default: 3)
                - min_contribution_threshold: Minimum contribution % (default: 0.05)
                - anomaly_threshold: Z-score threshold (default: 2.0)
                - top_n_contributors: Number of top contributors (default: 5)
                - enable_llm_reasoning: Use LLM for reasoning (default: True)
        """
        self.config = config or {}
        self.graph = create_attribution_graph()

    def analyze(
        self,
        query: str,
        data: pd.DataFrame,
        config_override: Dict[str, Any] = None
    ) -> Dict[str, Any]:
        """
        Run attribution analysis.

        Args:
            query: Natural language query describing the analysis
            data: DataFrame with data to analyze
            config_override: Optional config to override defaults

        Returns:
            Final state dictionary with analysis results
        """
        # Merge configs
        run_config = {**self.config}
        if config_override:
            run_config.update(config_override)

        # Initialize state
        initial_state = {
            'query': query,
            'data': data,
            'config': run_config,
            'current_drill_level': 0,
            'max_drill_depth': run_config.get('max_drill_depth', 3),
            'llm_responses': []
        }

        # Run graph
        final_state = self.graph.invoke(initial_state)

        return final_state

    def stream_analyze(
        self,
        query: str,
        data: pd.DataFrame,
        config_override: Dict[str, Any] = None
    ):
        """
        Run attribution analysis with streaming output.

        Args:
            query: Natural language query describing the analysis
            data: DataFrame with data to analyze
            config_override: Optional config to override defaults

        Yields:
            State updates as the analysis progresses
        """
        # Merge configs
        run_config = {**self.config}
        if config_override:
            run_config.update(config_override)

        # Initialize state
        initial_state = {
            'query': query,
            'data': data,
            'config': run_config,
            'current_drill_level': 0,
            'max_drill_depth': run_config.get('max_drill_depth', 3),
            'llm_responses': []
        }

        # Stream graph execution
        for state in self.graph.stream(initial_state):
            yield state

    def get_report(self, final_state: Dict[str, Any]) -> str:
        """
        Extract the final report from analysis results.

        Args:
            final_state: Final state from analyze()

        Returns:
            Report text
        """
        return final_state.get('report', 'No report generated.')

    def get_visualizations(self, final_state: Dict[str, Any]) -> list:
        """
        Extract visualizations from analysis results.

        Args:
            final_state: Final state from analyze()

        Returns:
            List of visualization specs
        """
        return final_state.get('visualizations', [])

    def get_root_causes(self, final_state: Dict[str, Any]) -> list:
        """
        Extract root causes from analysis results.

        Args:
            final_state: Final state from analyze()

        Returns:
            List of root causes
        """
        return final_state.get('root_causes', [])
