"""
Metrics calculation utilities for document analysis.

Provides comprehensive metrics calculation for document quality assessment
including readability, completeness, consistency, and specificity metrics.
"""

import re
import math
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass
import logging

# Try to import optional dependencies
try:
    import textstat
    TEXTSTAT_AVAILABLE = True
except ImportError:
    TEXTSTAT_AVAILABLE = False

try:
    import jieba
    import jieba.analyse
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False

logger = logging.getLogger(__name__)


@dataclass
class ReadabilityMetrics:
    """Readability assessment metrics."""
    flesch_score: float = 0.0
    avg_sentence_length: float = 0.0
    avg_word_length: float = 0.0
    syllable_count: int = 0
    complex_words: int = 0
    readability_level: str = ""


@dataclass
class ContentMetrics:
    """Content structure and completeness metrics."""
    total_words: int = 0
    total_sentences: int = 0
    total_paragraphs: int = 0
    heading_count: int = 0
    list_item_count: int = 0
    code_block_count: int = 0
    link_count: int = 0
    technical_terms: int = 0
    unique_words: int = 0


@dataclass
class QualityMetrics:
    """Document quality assessment metrics."""
    completeness_score: float = 0.0
    consistency_score: float = 0.0
    specificity_score: float = 0.0
    structure_score: float = 0.0
    clarity_score: float = 0.0
    overall_score: float = 0.0


class MetricsCalculator:
    """Advanced metrics calculator for document analysis."""

    def __init__(self, language: str = "zh", config: Dict[str, Any] = None):
        """Initialize metrics calculator.

        Args:
            language: Document language for metrics calculation
            config: Configuration for metric calculations
        """
        self.language = language
        self.config = config or {}

        # Scoring weights
        self.weights = self.config.get("weights", {
            "readability": 0.25,
            "completeness": 0.20,
            "consistency": 0.20,
            "specificity": 0.20,
            "structure": 0.15
        })

        # Thresholds for quality levels
        self.thresholds = self.config.get("thresholds", {
            "excellent": 85,
            "good": 70,
            "fair": 55,
            "poor": 0
        })

        # Initialize patterns
        self._initialize_patterns()

    def _initialize_patterns(self):
        """Initialize regex patterns for analysis."""
        # Technical terms patterns
        if self.language == "zh":
            self.technical_terms_pattern = re.compile(
                r'(API|UI|UX|HTTP|JSON|XML|SQL|CSS|HTML|JavaScript|Python|Java|C\+\+|算法|数据库|框架|接口|模块|系统)'
            )
            self.requirement_keywords_pattern = re.compile(
                r'(应该|必须|需要|要求|功能|需求|用户|系统|支持|提供|实现|处理|管理|显示|输入|输出)'
            )
            self.ambiguous_indicators_pattern = re.compile(
                r'(可能|大概|或许|应该|通常|一般|相关|适当|基本|主要|重要|必要)'
            )
            self.quantitative_indicators_pattern = re.compile(
                r'(\d+[\u4e00-\u9fff]*|\d+\.\d+|\d+%|\d+个|\d+次|\d+秒|\d+分钟|\d+小时|\d+天|\d+MB|\d+GB)'
            )
        else:
            self.technical_terms_pattern = re.compile(
                r'(API|UI|UX|HTTP|JSON|XML|SQL|CSS|HTML|JavaScript|Python|Java|C\+\+)'
            )
            self.requirement_keywords_pattern = re.compile(
                r'(should|must|need|require|function|requirement|user|system|support|provide|implement)'
            )
            self.ambiguous_indicators_pattern = re.compile(
                r'(might|could|possibly|probably|usually|generally|related|appropriate|basic|major)'
            )
            self.quantitative_indicators_pattern = re.compile(
                r'(\d+\.\d+|\d+%|\d+\s*(?:times?|seconds?|minutes?|hours?|days?|MB|GB))'
            )

        # Structure patterns
        self.heading_pattern = re.compile(r'^#{1,6}\s+(.+)$', re.MULTILINE)
        self.list_pattern = re.compile(r'^\s*[-*+]\s+(.+)$', re.MULTILINE)
        self.numbered_list_pattern = re.compile(r'^\s*\d+\.\s+(.+)$', re.MULTILINE)
        self.code_block_pattern = re.compile(r'```[\s\S]*?```')
        self.link_pattern = re.compile(r'\[([^\]]+)\]\(([^)]+)\)')

    def calculate_comprehensive_metrics(self, text: str, document_structure: Dict[str, Any] = None) -> Dict[str, Any]:
        """Calculate comprehensive document metrics.

        Args:
            text: Document text content
            document_structure: Additional structure information

        Returns:
            Comprehensive metrics dictionary
        """
        # Calculate basic content metrics
        content_metrics = self.calculate_content_metrics(text, document_structure)

        # Calculate readability metrics
        readability_metrics = self.calculate_readability_metrics(text)

        # Calculate quality metrics
        quality_metrics = self.calculate_quality_metrics(text, content_metrics, readability_metrics)

        # Combine all metrics
        comprehensive_metrics = {
            "content": content_metrics.__dict__,
            "readability": readability_metrics.__dict__,
            "quality": quality_metrics.__dict__,
            "language": self.language,
            "analysis_timestamp": self._get_timestamp()
        }

        return comprehensive_metrics

    def calculate_content_metrics(self, text: str, document_structure: Dict[str, Any] = None) -> ContentMetrics:
        """Calculate content structure metrics.

        Args:
            text: Document text content
            document_structure: Additional structure information

        Returns:
            Content metrics
        """
        # Basic text statistics
        if self.language == "zh" and JIEBA_AVAILABLE:
            words = list(jieba.cut(text))
        else:
            words = re.findall(r'\b\w+\b', text)

        sentences = self._split_sentences(text)
        paragraphs = [p for p in text.split('\n\n') if p.strip()]

        # Structure analysis
        headings = self.heading_pattern.findall(text)
        list_items = len(self.list_pattern.findall(text)) + len(self.numbered_list_pattern.findall(text))
        code_blocks = len(self.code_block_pattern.findall(text))
        links = len(self.link_pattern.findall(text))

        # Vocabulary analysis
        unique_words = len(set(word.lower() for word in words if word.isalpha()))
        technical_terms = len(self.technical_terms_pattern.findall(text))

        return ContentMetrics(
            total_words=len(words),
            total_sentences=len(sentences),
            total_paragraphs=len(paragraphs),
            heading_count=len(headings),
            list_item_count=list_items,
            code_block_count=code_blocks,
            link_count=links,
            technical_terms=technical_terms,
            unique_words=unique_words
        )

    def calculate_readability_metrics(self, text: str) -> ReadabilityMetrics:
        """Calculate readability metrics.

        Args:
            text: Document text content

        Returns:
            Readability metrics
        """
        if self.language == "en" and TEXTSTAT_AVAILABLE:
            return self._calculate_english_readability(text)
        else:
            return self._calculate_chinese_readability(text)

    def _calculate_english_readability(self, text: str) -> ReadabilityMetrics:
        """Calculate readability metrics for English text.

        Args:
            text: English text content

        Returns:
            Readability metrics
        """
        try:
            # Use textstat for English readability
            flesch_score = textstat.flesch_reading_ease(text)
            avg_sentence_length = textstat.avg_sentence_length(text)
            avg_word_length = textstat.avg_letter_per_word(text)
            syllable_count = textstat.syllable_count(text)
            complex_words = textstat.difficult_words(text)

            # Determine readability level
            if flesch_score >= 90:
                level = "Very Easy"
            elif flesch_score >= 80:
                level = "Easy"
            elif flesch_score >= 70:
                level = "Fairly Easy"
            elif flesch_score >= 60:
                level = "Standard"
            elif flesch_score >= 50:
                level = "Fairly difficult"
            elif flesch_score >= 30:
                level = "Difficult"
            else:
                level = "Very difficult"

            return ReadabilityMetrics(
                flesch_score=flesch_score,
                avg_sentence_length=avg_sentence_length,
                avg_word_length=avg_word_length,
                syllable_count=syllable_count,
                complex_words=complex_words,
                readability_level=level
            )
        except Exception as e:
            logger.warning(f"English readability calculation failed: {e}")
            return self._calculate_basic_readability(text)

    def _calculate_chinese_readability(self, text: str) -> ReadabilityMetrics:
        """Calculate readability metrics for Chinese text.

        Args:
            text: Chinese text content

        Returns:
            Readability metrics
        """
        try:
            # Tokenize Chinese text
            if JIEBA_AVAILABLE:
                words = list(jieba.cut(text))
            else:
                words = re.findall(r'[\u4e00-\u9fff]+', text)

            sentences = self._split_sentences(text)

            # Calculate basic metrics
            avg_sentence_length = len(words) / max(len(sentences), 1)
            avg_word_length = sum(len(word) for word in words) / max(len(words), 1)

            # Simplified readability score for Chinese
            # Based on sentence length and character complexity
            complexity_chars = len(re.findall(r'[^\u4e00-\u9fff\s\w\.,!?;:]', text))
            complexity_ratio = complexity_chars / max(len(text), 1)

            # Base score adjusted by complexity
            base_score = 100
            sentence_penalty = max(0, (avg_sentence_length - 25) * 1.5)
            complexity_penalty = complexity_ratio * 30
            flesch_score = max(0, base_score - sentence_penalty - complexity_penalty)

            # Determine readability level
            if flesch_score >= 80:
                level = "易读"
            elif flesch_score >= 60:
                level = "中等"
            elif flesch_score >= 40:
                level = "较难"
            else:
                level = "困难"

            return ReadabilityMetrics(
                flesch_score=flesch_score,
                avg_sentence_length=avg_sentence_length,
                avg_word_length=avg_word_length,
                syllable_count=0,  # Not applicable for Chinese
                complex_words=0,  # Not calculated for Chinese
                readability_level=level
            )
        except Exception as e:
            logger.warning(f"Chinese readability calculation failed: {e}")
            return self._calculate_basic_readability(text)

    def _calculate_basic_readability(self, text: str) -> ReadabilityMetrics:
        """Calculate basic readability metrics as fallback.

        Args:
            text: Text content

        Returns:
            Basic readability metrics
        """
        words = re.findall(r'\b\w+\b', text)
        sentences = self._split_sentences(text)

        avg_sentence_length = len(words) / max(len(sentences), 1)
        avg_word_length = sum(len(word) for word in words) / max(len(words), 1)

        # Simple scoring
        flesch_score = max(0, 100 - (avg_sentence_length - 15) * 2 - (avg_word_length - 5) * 3)

        return ReadabilityMetrics(
            flesch_score=flesch_score,
            avg_sentence_length=avg_sentence_length,
            avg_word_length=avg_word_length,
            syllable_count=0,
            complex_words=0,
            readability_level="Unknown"
        )

    def calculate_quality_metrics(
        self,
        text: str,
        content_metrics: ContentMetrics,
        readability_metrics: ReadabilityMetrics
    ) -> QualityMetrics:
        """Calculate overall quality metrics.

        Args:
            text: Document text content
            content_metrics: Content metrics
            readability_metrics: Readability metrics

        Returns:
            Quality metrics
        """
        # Calculate individual quality scores
        completeness_score = self._calculate_completeness_score(text, content_metrics)
        consistency_score = self._calculate_consistency_score(text)
        specificity_score = self._calculate_specificity_score(text)
        structure_score = self._calculate_structure_score(text, content_metrics)
        clarity_score = readability_metrics.flesch_score

        # Calculate overall weighted score
        overall_score = (
            completeness_score * self.weights["completeness"] +
            consistency_score * self.weights["consistency"] +
            specificity_score * self.weights["specificity"] +
            structure_score * self.weights["structure"] +
            clarity_score * self.weights["readability"]
        )

        return QualityMetrics(
            completeness_score=completeness_score,
            consistency_score=consistency_score,
            specificity_score=specificity_score,
            structure_score=structure_score,
            clarity_score=clarity_score,
            overall_score=min(100, max(0, overall_score))
        )

    def _calculate_completeness_score(self, text: str, content_metrics: ContentMetrics) -> float:
        """Calculate document completeness score.

        Args:
            text: Document text content
            content_metrics: Content metrics

        Returns:
            Completeness score (0-100)
        """
        score = 0.0

        # Base score for having content
        if content_metrics.total_words > 0:
            score += 20

        # Structure completeness
        if content_metrics.heading_count > 0:
            score += 15
        if content_metrics.list_item_count > 0:
            score += 10
        if content_metrics.link_count > 0:
            score += 5

        # Content richness
        if content_metrics.technical_terms > 0:
            score += 10
        if content_metrics.unique_words > 50:
            score += 10
        if content_metrics.total_paragraphs > 3:
            score += 10

        # Requirement indicators
        requirement_count = len(self.requirement_keywords_pattern.findall(text))
        if requirement_count > 0:
            score += min(20, requirement_count * 2)

        return min(100, score)

    def _calculate_consistency_score(self, text: str) -> float:
        """Calculate document consistency score.

        Args:
            text: Document text content

        Returns:
            Consistency score (0-100)
        """
        score = 100.0

        # Penalize inconsistent formatting
        heading_formats = re.findall(r'^#{1,6}\s+(.+)$', text, re.MULTILINE)
        if heading_formats:
            # Check for consistent heading capitalization
            capitalization_styles = set()
            for heading in heading_formats:
                if heading[0].isupper():
                    capitalization_styles.add("title_case")
                else:
                    capitalization_styles.add("sentence_case")

            if len(capitalization_styles) > 1:
                score -= 10

        # Check for consistent list formatting
        bullet_lists = re.findall(r'^\s*([-*+])\s+', text, re.MULTILINE)
        if bullet_lists:
            bullet_types = set(bullet_lists)
            if len(bullet_types) > 1:
                score -= 10

        # Penalize excessive ambiguous language
        ambiguous_count = len(self.ambiguous_indicators_pattern.findall(text))
        ambiguous_penalty = min(30, ambiguous_count * 2)
        score -= ambiguous_penalty

        return max(0, score)

    def _calculate_specificity_score(self, text: str) -> float:
        """Calculate document specificity score.

        Args:
            text: Document text content

        Returns:
            Specificity score (0-100)
        """
        score = 0.0

        # Reward quantitative information
        quantitative_count = len(self.quantitative_indicators_pattern.findall(text))
        score += min(40, quantitative_count * 5)

        # Reward specific technical terms
        technical_count = len(self.technical_terms_pattern.findall(text))
        score += min(30, technical_count * 3)

        # Penalize vague language
        vague_count = len(self.ambiguous_indicators_pattern.findall(text))
        vague_penalty = min(30, vague_count * 3)
        score = max(0, score - vague_penalty)

        # Reward detailed descriptions
        sentences = self._split_sentences(text)
        detailed_sentences = 0
        for sentence in sentences:
            if len(sentence.split()) > 15:  # Longer sentences tend to be more detailed
                detailed_sentences += 1

        if sentences:
            detail_ratio = detailed_sentences / len(sentences)
            score += min(30, detail_ratio * 30)

        return min(100, score)

    def _calculate_structure_score(self, text: str, content_metrics: ContentMetrics) -> float:
        """Calculate document structure score.

        Args:
            text: Document text content
            content_metrics: Content metrics

        Returns:
            Structure score (0-100)
        """
        score = 0.0

        # Base structure elements
        if content_metrics.heading_count > 0:
            score += 25
        if content_metrics.list_item_count > 0:
            score += 20
        if content_metrics.total_paragraphs > 1:
            score += 15

        # Hierarchical structure
        headings = self.heading_pattern.findall(text)
        if headings:
            heading_levels = []
            for heading in headings:
                level = len(re.match(r'^#+', heading).group())
                heading_levels.append(level)

            # Check for proper heading hierarchy
            if len(set(heading_levels)) > 1:  # Multiple levels
                score += 20

            # Check for logical progression (no skipped levels)
            for i in range(1, len(heading_levels)):
                if heading_levels[i] > heading_levels[i-1] + 1:
                    score -= 10
                    break

        # Code blocks and examples
        if content_metrics.code_block_count > 0:
            score += 10
        if content_metrics.link_count > 0:
            score += 10

        return min(100, max(0, score))

    def _split_sentences(self, text: str) -> List[str]:
        """Split text into sentences.

        Args:
            text: Text to split

        Returns:
            List of sentences
        """
        if self.language == "zh":
            # Chinese sentence splitting
            sentences = re.split(r'[。！？；]', text)
        else:
            # English sentence splitting
            sentences = re.split(r'[.!?;]', text)

        return [s.strip() for s in sentences if s.strip()]

    def _get_timestamp(self) -> str:
        """Get current timestamp.

        Returns:
            ISO format timestamp
        """
        from datetime import datetime
        return datetime.now().isoformat()

    def get_quality_level(self, score: float) -> str:
        """Get quality level based on score.

        Args:
            score: Quality score

        Returns:
            Quality level string
        """
        if score >= self.thresholds["excellent"]:
            return "excellent"
        elif score >= self.thresholds["good"]:
            return "good"
        elif score >= self.thresholds["fair"]:
            return "fair"
        else:
            return "poor"

    def generate_metrics_summary(self, metrics: Dict[str, Any]) -> Dict[str, Any]:
        """Generate human-readable metrics summary.

        Args:
            metrics: Comprehensive metrics dictionary

        Returns:
            Metrics summary
        """
        quality_metrics = metrics["quality"]
        content_metrics = metrics["content"]
        readability_metrics = metrics["readability"]

        return {
            "overall_assessment": {
                "score": quality_metrics["overall_score"],
                "level": self.get_quality_level(quality_metrics["overall_score"]),
                "grade": self._get_letter_grade(quality_metrics["overall_score"])
            },
            "content_summary": {
                "words": content_metrics["total_words"],
                "sentences": content_metrics["total_sentences"],
                "paragraphs": content_metrics["total_paragraphs"],
                "headings": content_metrics["heading_count"],
                "technical_terms": content_metrics["technical_terms"]
            },
            "readability_summary": {
                "score": readability_metrics["flesch_score"],
                "level": readability_metrics["readability_level"],
                "avg_sentence_length": readability_metrics["avg_sentence_length"]
            },
            "quality_breakdown": {
                "completeness": quality_metrics["completeness_score"],
                "consistency": quality_metrics["consistency_score"],
                "specificity": quality_metrics["specificity_score"],
                "structure": quality_metrics["structure_score"],
                "clarity": quality_metrics["clarity_score"]
            }
        }

    def _get_letter_grade(self, score: float) -> str:
        """Convert numeric score to letter grade.

        Args:
            score: Numeric score

        Returns:
            Letter grade
        """
        if score >= 90:
            return "A"
        elif score >= 80:
            return "B"
        elif score >= 70:
            return "C"
        elif score >= 60:
            return "D"
        else:
            return "F"