"""
Pydantic schemas for data validation and configuration.
"""
from pydantic import BaseModel, Field
from typing import Optional, List, Dict, Any, Literal
from datetime import datetime
from enum import Enum


class DecompositionType(str, Enum):
    """Types of metric decomposition."""
    ADDITIVE = "additive"
    MULTIPLICATIVE = "multiplicative"
    RATIO = "ratio"


class SeverityLevel(str, Enum):
    """Severity levels for anomalies."""
    LOW = "low"
    MEDIUM = "medium"
    HIGH = "high"
    CRITICAL = "critical"


class MetricConfig(BaseModel):
    """Configuration for a metric."""
    name: str
    display_name: str
    description: Optional[str] = None
    formula: Optional[str] = None
    unit: Optional[str] = None
    decomposition_type: Optional[DecompositionType] = None
    components: Optional[List[str]] = None
    aggregation: Literal["sum", "avg", "max", "min", "count"] = "sum"


class DimensionConfig(BaseModel):
    """Configuration for a dimension."""
    name: str
    display_name: str
    description: Optional[str] = None
    cardinality: Optional[int] = None
    hierarchy_level: int = 0
    parent_dimension: Optional[str] = None


class AnalysisConfig(BaseModel):
    """Configuration for attribution analysis."""
    max_drill_depth: int = Field(default=3, ge=1, le=10)
    min_contribution_threshold: float = Field(default=0.05, ge=0, le=1)
    anomaly_threshold: float = Field(default=2.0, ge=0)
    top_n_contributors: int = Field(default=5, ge=1, le=20)
    enable_llm_reasoning: bool = True
    llm_model: str = "gpt-4"
    temperature: float = Field(default=0.1, ge=0, le=2)


class AnomalyResult(BaseModel):
    """Result of anomaly detection."""
    metric: str
    timestamp: datetime
    actual_value: float
    expected_value: float
    deviation: float
    deviation_percentage: float
    severity: SeverityLevel
    confidence: float = Field(ge=0, le=1)
    method: str
    context: Optional[Dict[str, Any]] = None


class DimensionContribution(BaseModel):
    """Contribution of a dimension value."""
    dimension: str
    value: str
    metric_value: float
    baseline_value: Optional[float] = None
    contribution: float
    contribution_percentage: float
    change_rate: Optional[float] = None
    is_significant: bool
    rank: int


class DrillDownResult(BaseModel):
    """Result of a drill-down analysis."""
    level: int
    dimension: str
    parent_filter: Optional[Dict[str, str]] = None
    top_contributors: List[DimensionContribution]
    total_explained: float
    timestamp: datetime
    reasoning: Optional[str] = None


class MetricDecompositionResult(BaseModel):
    """Result of metric decomposition."""
    metric: str
    formula: str
    decomposition_type: DecompositionType
    components: Dict[str, float]
    component_changes: Dict[str, float]
    component_contributions: Dict[str, float]
    analysis: Optional[str] = None


class AttributionResult(BaseModel):
    """Final attribution analysis result."""
    query: str
    target_metric: str
    time_range: tuple[datetime, datetime]

    # Analysis results
    anomalies: List[AnomalyResult]
    drill_downs: List[DrillDownResult]
    decomposition: Optional[MetricDecompositionResult] = None

    # Root causes
    root_causes: List[Dict[str, Any]]

    # Summary
    summary: str
    confidence: float = Field(ge=0, le=1)

    # Metadata
    analysis_timestamp: datetime
    execution_time_seconds: float

    class Config:
        json_encoders = {
            datetime: lambda v: v.isoformat()
        }
