"""
Analysis result models for document analysis system.

Contains comprehensive analysis outcomes including clarity scores,
metrics, and relationships to clarifications and suggestions.
"""

import json
from dataclasses import dataclass, field
from datetime import datetime
from typing import Dict, List, Optional, Any


@dataclass
class ClarityScore:
    """Represents clarity assessment scores across multiple dimensions."""

    overall: float = 0.0
    readability: float = 0.0
    completeness: float = 0.0
    consistency: float = 0.0
    specificity: float = 0.0
    structure: float = 0.0

    def __post_init__(self):
        """Validate score ranges after initialization."""
        self._validate_scores()

    def _validate_scores(self):
        """Ensure all scores are within valid range (0-100)."""
        for score_name, score_value in self.__dict__.items():
            if not isinstance(score_value, (int, float)):
                raise ValueError(f"Score {score_name} must be numeric, got {type(score_value)}")
            if not 0 <= score_value <= 100:
                raise ValueError(f"Score {score_name} must be between 0 and 100, got {score_value}")

    def to_dict(self) -> Dict[str, float]:
        """Convert clarity score to dictionary.

        Returns:
            Dictionary representation
        """
        return {
            'overall': self.overall,
            'readability': self.readability,
            'completeness': self.completeness,
            'consistency': self.consistency,
            'specificity': self.specificity,
            'structure': self.structure
        }

    @classmethod
    def from_dict(cls, data: Dict[str, float]) -> 'ClarityScore':
        """Create clarity score from dictionary.

        Args:
            data: Dictionary with score values

        Returns:
            ClarityScore instance
        """
        return cls(**data)

    def get_level(self) -> str:
        """Get clarity level based on overall score.

        Returns:
            Clarity level (excellent, good, fair, poor)
        """
        if self.overall >= 85:
            return "excellent"
        elif self.overall >= 70:
            return "good"
        elif self.overall >= 55:
            return "fair"
        else:
            return "poor"

    def get_weighted_average(self, weights: Dict[str, float] = None) -> float:
        """Calculate weighted average of component scores.

        Args:
            weights: Optional custom weights

        Returns:
            Weighted average score
        """
        default_weights = {
            'readability': 0.25,
            'completeness': 0.20,
            'consistency': 0.20,
            'specificity': 0.20,
            'structure': 0.15
        }

        if weights:
            default_weights.update(weights)

        weighted_sum = sum(
            getattr(self, component) * weight
            for component, weight in default_weights.items()
        )

        return round(weighted_sum, 2)


@dataclass
class AnalysisMetrics:
    """Quantitative analysis metrics for document content."""

    total_sentences: int = 0
    avg_sentence_length: float = 0.0
    avg_word_length: float = 0.0
    unique_words_count: int = 0
    technical_terms_count: int = 0
    ambiguous_phrases_count: int = 0
    missing_sections_count: int = 0
    readability_score: float = 0.0
    complexity_score: float = 0.0

    def to_dict(self) -> Dict[str, Any]:
        """Convert metrics to dictionary.

        Returns:
            Dictionary representation
        """
        return {
            'total_sentences': self.total_sentences,
            'avg_sentence_length': self.avg_sentence_length,
            'avg_word_length': self.avg_word_length,
            'unique_words_count': self.unique_words_count,
            'technical_terms_count': self.technical_terms_count,
            'ambiguous_phrases_count': self.ambiguous_phrases_count,
            'missing_sections_count': self.missing_sections_count,
            'readability_score': self.readability_score,
            'complexity_score': self.complexity_score
        }

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'AnalysisMetrics':
        """Create metrics from dictionary.

        Args:
            data: Dictionary with metrics values

        Returns:
            AnalysisMetrics instance
        """
        return cls(**data)


@dataclass
class AnalysisResult:
    """Contains comprehensive analysis outcomes for a document."""

    # Core attributes
    id: str
    document_id: str
    analysis_date: datetime

    # Scores and metrics
    clarity_score: ClarityScore
    metrics: AnalysisMetrics

    # Content analysis
    functional_requirements_count: int = 0
    user_stories_count: int = 0
    acceptance_criteria_count: int = 0

    # Quality indicators
    clarity_level: str = ""
    completeness_percentage: float = 0.0
    ambiguity_percentage: float = 0.0

    # Issues and suggestions
    major_issues: List[str] = field(default_factory=list)
    improvement_areas: List[str] = field(default_factory=list)

    # Analysis metadata
    analysis_version: str = "1.0"
    processing_time_seconds: float = 0.0
    analyzer_notes: str = ""

    # Relationships (populated from other entities)
    _clarification_items: List['ClarificationItem'] = field(default_factory=list, repr=False)
    _suggestions: List['ImprovementSuggestion'] = field(default_factory=list, repr=False)

    def __post_init__(self):
        """Calculate derived attributes after initialization."""
        if not self.clarity_level:
            self.clarity_level = self.clarity_score.get_level()

        # Calculate percentages based on metrics
        self._calculate_percentages()

    def _calculate_percentages(self):
        """Calculate completeness and ambiguity percentages."""
        if self.metrics.total_sentences > 0:
            # Ambiguity percentage based on ambiguous phrases vs total content
            total_content_indicators = (
                self.metrics.ambiguous_phrases_count +
                self.metrics.missing_sections_count
            )
            self.ambiguity_percentage = min(
                (total_content_indicators / max(self.metrics.total_sentences, 1)) * 100,
                100.0
            )

        # Completeness based on clarity score components
        self.completeness_percentage = self.clarity_score.completeness

    def get_summary(self) -> Dict[str, Any]:
        """Get analysis summary.

        Returns:
            Summary dictionary
        """
        return {
            "document_id": self.document_id,
            "analysis_date": self.analysis_date.isoformat(),
            "overall_score": self.clarity_score.overall,
            "clarity_level": self.clarity_level,
            "completeness_percentage": self.completeness_percentage,
            "ambiguity_percentage": self.ambiguity_percentage,
            "functional_requirements": self.functional_requirements_count,
            "user_stories": self.user_stories_count,
            "acceptance_criteria": self.acceptance_criteria_count,
            "major_issues_count": len(self.major_issues),
            "improvement_areas_count": len(self.improvement_areas),
            "processing_time_seconds": self.processing_time_seconds
        }

    def get_strengths(self) -> List[str]:
        """Get document strengths based on analysis.

        Returns:
            List of strengths
        """
        strengths = []

        # High-scoring areas
        score_components = {
            "Readability": self.clarity_score.readability,
            "Completeness": self.clarity_score.completeness,
            "Consistency": self.clarity_score.consistency,
            "Specificity": self.clarity_score.specificity,
            "Structure": self.clarity_score.structure
        }

        for component, score in score_components.items():
            if score >= 80:
                strengths.append(f"Excellent {component.lower()}")
            elif score >= 70:
                strengths.append(f"Good {component.lower()}")

        # Content strengths
        if self.functional_requirements_count >= 10:
            strengths.append("Comprehensive functional requirements")

        if self.user_stories_count >= 3:
            strengths.append("Well-defined user stories")

        if self.acceptance_criteria_count >= 5:
            strengths.append("Detailed acceptance criteria")

        return strengths

    def get_weaknesses(self) -> List[str]:
        """Get document weaknesses based on analysis.

        Returns:
            List of weaknesses
        """
        weaknesses = []

        # Low-scoring areas
        score_components = {
            "Readability": self.clarity_score.readability,
            "Completeness": self.clarity_score.completeness,
            "Consistency": self.clarity_score.consistency,
            "Specificity": self.clarity_score.specificity,
            "Structure": self.clarity_score.structure
        }

        for component, score in score_components.items():
            if score < 55:
                weaknesses.append(f"Poor {component.lower()}")
            elif score < 70:
                weaknesses.append(f"Could improve {component.lower()}")

        # Content weaknesses
        if self.functional_requirements_count < 3:
            weaknesses.append("Limited functional requirements")

        if self.user_stories_count < 2:
            weaknesses.append("Insufficient user stories")

        if self.acceptance_criteria_count < 3:
            weaknesses.append("Missing acceptance criteria")

        # High ambiguity
        if self.ambiguity_percentage > 30:
            weaknesses.append("High ambiguity in descriptions")

        return weaknesses

    def get_priority_recommendations(self, max_count: int = 5) -> List[str]:
        """Get priority recommendations for improvement.

        Args:
            max_count: Maximum number of recommendations

        Returns:
            List of priority recommendations
        """
        recommendations = []

        # Based on lowest scoring areas
        score_components = {
            "Specificity": self.clarity_score.specificity,
            "Completeness": self.clarity_score.completeness,
            "Structure": self.clarity_score.structure,
            "Readability": self.clarity_score.readability,
            "Consistency": self.clarity_score.consistency
        }

        sorted_components = sorted(score_components.items(), key=lambda x: x[1])

        for component, score in sorted_components[:max_count]:
            if score < 70:
                recommendations.append(f"Improve {component.lower()} (current: {score:.1f}/100)")

        # Add content-based recommendations
        if self.functional_requirements_count < 5:
            recommendations.append("Add more functional requirements")

        if self.user_stories_count < 2:
            recommendations.append("Define user stories for better user-centric documentation")

        if self.acceptance_criteria_count < 3:
            recommendations.append("Add acceptance criteria for each requirement")

        return recommendations[:max_count]

    def to_dict(self) -> Dict[str, Any]:
        """Convert analysis result to dictionary.

        Returns:
            Dictionary representation
        """
        return {
            "id": self.id,
            "document_id": self.document_id,
            "analysis_date": self.analysis_date.isoformat(),
            "clarity_score": self.clarity_score.to_dict(),
            "metrics": self.metrics.to_dict(),
            "functional_requirements_count": self.functional_requirements_count,
            "user_stories_count": self.user_stories_count,
            "acceptance_criteria_count": self.acceptance_criteria_count,
            "clarity_level": self.clarity_level,
            "completeness_percentage": self.completeness_percentage,
            "ambiguity_percentage": self.ambiguity_percentage,
            "major_issues": self.major_issues,
            "improvement_areas": self.improvement_areas,
            "analysis_version": self.analysis_version,
            "processing_time_seconds": self.processing_time_seconds,
            "analyzer_notes": self.analyzer_notes
        }

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'AnalysisResult':
        """Create analysis result from dictionary.

        Args:
            data: Dictionary data

        Returns:
            AnalysisResult instance
        """
        # Convert date string to datetime
        if data.get("analysis_date"):
            data["analysis_date"] = datetime.fromisoformat(data["analysis_date"])

        # Convert nested objects
        if data.get("clarity_score"):
            data["clarity_score"] = ClarityScore.from_dict(data["clarity_score"])

        if data.get("metrics"):
            data["metrics"] = AnalysisMetrics.from_dict(data["metrics"])

        return cls(**data)

    def __str__(self) -> str:
        """String representation."""
        return (f"AnalysisResult(id={self.id}, document={self.document_id}, "
                f"score={self.clarity_score.overall:.1f}, level={self.clarity_level})")


# Import here to avoid circular imports
try:
    from .clarification import ClarificationItem
    from .suggestion import ImprovementSuggestion
except ImportError:
    # Fallback for type hints
    ClarificationItem = None
    ImprovementSuggestion = None