"""
Attribution reasoning node - uses LLM to synthesize analysis results.
"""
from typing import Dict, Any
from langchain_core.prompts import ChatPromptTemplate
import json
from ..tools.llm_factory import create_llm


def perform_attribution_reasoning(state: Dict[str, Any]) -> Dict[str, Any]:
    """
    Perform attribution reasoning by synthesizing all analysis results.

    Uses LLM to:
    1. Analyze anomalies
    2. Interpret drill-down results
    3. Explain metric decomposition
    4. Identify root causes

    Args:
        state: Current state dict

    Returns:
        Updated state dict with attribution results
    """
    config = state.get('config', {})
    enable_llm = config.get('enable_llm_reasoning', True)

    if not enable_llm:
        # Skip LLM reasoning
        state['attribution_result'] = _generate_simple_attribution(state)
        state['next_action'] = 'generate_report'
        return state

    # Prepare analysis summary
    analysis_summary = _prepare_analysis_summary(state)

    # Create LLM using factory (supports OpenRouter and OpenAI)
    llm = create_llm(config=config)

    # Create prompt
    prompt = ChatPromptTemplate.from_messages([
        ("system", """You are an expert data analyst specializing in root cause analysis and attribution.

Analyze the provided data analysis results and identify the root causes of metric changes.

Provide your analysis in JSON format with these fields:
{{
    "root_causes": [
        {{
            "cause": "brief description of root cause",
            "evidence": "supporting evidence from the data",
            "impact": "estimated impact (high/medium/low)",
            "confidence": 0.0-1.0
        }}
    ],
    "summary": "2-3 sentence summary of findings",
    "recommendations": ["actionable recommendation 1", "recommendation 2"]
}}

Focus on:
1. Identifying specific dimension values or segments driving changes
2. Quantifying the impact of each factor
3. Distinguishing correlation from causation
4. Providing actionable insights
"""),
        ("user", """Analysis Results:

Target Metric: {target_metric}
Time Range: {time_range}
Baseline Period: {baseline_period}

{analysis_content}

Please analyze these results and identify the root causes.""")
    ])

    # Invoke LLM
    chain = prompt | llm
    response = chain.invoke({
        "target_metric": state.get('target_metric', 'unknown'),
        "time_range": str(state.get('time_range', 'unknown')),
        "baseline_period": str(state.get('baseline_period', 'unknown')),
        "analysis_content": analysis_summary
    })

    # Parse response
    try:
        result = json.loads(response.content)
        state['attribution_result'] = result
        state['root_causes'] = result.get('root_causes', [])
    except json.JSONDecodeError:
        # Fallback to simple attribution
        state['attribution_result'] = _generate_simple_attribution(state)
        state['root_causes'] = state['attribution_result'].get('root_causes', [])

    # Store LLM response
    if 'llm_responses' not in state:
        state['llm_responses'] = []
    state['llm_responses'].append(response.content)

    state['next_action'] = 'generate_report'
    return state


def _prepare_analysis_summary(state: Dict[str, Any]) -> str:
    """Prepare a summary of all analysis results."""
    sections = []

    # Anomalies
    anomalies = state.get('anomalies', [])
    if anomalies:
        sections.append("## Anomalies Detected")
        sections.append(f"Found {len(anomalies)} anomalies:")
        for i, anomaly in enumerate(anomalies[:5], 1):  # Top 5
            sections.append(
                f"{i}. {anomaly.get('timestamp', 'N/A')}: "
                f"Actual={anomaly.get('actual_value', 0):.2f}, "
                f"Expected={anomaly.get('expected_value', 0):.2f}, "
                f"Deviation={anomaly.get('deviation_percentage', 0):.1f}%, "
                f"Severity={anomaly.get('severity', 'unknown')}"
            )

    # Drill-down results
    drill_path = state.get('drill_down_path', [])
    if drill_path:
        sections.append("\n## Drill-Down Analysis")
        for level, result in enumerate(drill_path):
            sections.append(f"\nLevel {level} - {result.get('dimension', 'unknown')}:")
            top_contributors = result.get('top_contributors', [])
            for contrib in top_contributors[:3]:  # Top 3
                sections.append(
                    f"  - {contrib.get('value', 'N/A')}: "
                    f"{contrib.get('contribution_percentage', 0):.1f}% contribution, "
                    f"Change: {contrib.get('change_rate', 0) * 100:.1f}%"
                )

    # Decomposition
    decomposition = state.get('decomposition')
    if decomposition:
        sections.append("\n## Metric Decomposition")
        sections.append(f"Formula: {decomposition.get('formula', 'N/A')}")
        sections.append(f"Type: {decomposition.get('decomposition_type', 'N/A')}")
        sections.append("Component Contributions:")
        for comp, contrib in decomposition.get('component_contributions', {}).items():
            change = decomposition.get('component_changes', {}).get(comp, 0)
            sections.append(f"  - {comp}: {contrib * 100:.1f}% contribution, Change: {change:.2f}")

    return "\n".join(sections) if sections else "No significant analysis results found."


def _generate_simple_attribution(state: Dict[str, Any]) -> Dict[str, Any]:
    """Generate simple attribution without LLM."""
    root_causes = []

    # Extract root causes from drill-down
    drill_path = state.get('drill_down_path', [])
    if drill_path:
        first_level = drill_path[0]
        if first_level.get('top_contributors'):
            top = first_level['top_contributors'][0]
            root_causes.append({
                'cause': f"{first_level.get('dimension')}: {top.get('value')}",
                'evidence': f"Contributes {top.get('contribution_percentage', 0):.1f}% with {top.get('change_rate', 0) * 100:.1f}% change",
                'impact': 'high' if abs(top.get('contribution', 0)) > 0.3 else 'medium',
                'confidence': 0.7
            })

    # Extract from decomposition
    decomposition = state.get('decomposition')
    if decomposition:
        contributions = decomposition.get('component_contributions', {})
        if contributions:
            top_component = max(contributions.items(), key=lambda x: abs(x[1]))
            root_causes.append({
                'cause': f"Component: {top_component[0]}",
                'evidence': f"Contributes {top_component[1] * 100:.1f}% to metric change",
                'impact': 'high' if abs(top_component[1]) > 0.5 else 'medium',
                'confidence': 0.6
            })

    return {
        'root_causes': root_causes,
        'summary': 'Analysis completed with basic attribution.',
        'recommendations': ['Review top contributing dimensions', 'Monitor anomalous patterns']
    }
