"""
Metrics calculation utilities for specification fixing.

Provides calculation and analysis of various metrics for
specification quality, completeness, and consistency.
"""

import re
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime
import json

from ..utils.logger import get_logger
from ..utils.exceptions import AnalysisError

logger = get_logger(__name__)


class MetricsCalculator:
    """Calculator for specification metrics."""

    def __init__(self):
        """Initialize metrics calculator."""
        self.quality_weights = {
            "readability": 0.20,
            "completeness": 0.25,
            "consistency": 0.20,
            "specificity": 0.20,
            "structure": 0.15
        }

        self.completeness_criteria = {
            "has_user_stories": 20,
            "has_requirements": 25,
            "has_acceptance_criteria": 20,
            "has_success_criteria": 15,
            "has_clear_scope": 20
        }

    def calculate_overall_score(self, metrics: Dict[str, Any]) -> Dict[str, Any]:
        """
        Calculate overall specification quality score.

        Args:
            metrics: Individual metric scores

        Returns:
            Overall score with breakdown
        """
        try:
            overall_score = 0
            breakdown = {}

            for metric, weight in self.quality_weights.items():
                score = metrics.get(metric, {}).get("score", 0)
                weighted_score = score * weight
                overall_score += weighted_score
                breakdown[metric] = {
                    "score": score,
                    "weight": weight,
                    "weighted_score": weighted_score
                }

            # Determine grade
            grade = self._get_grade(overall_score)

            result = {
                "overall_score": round(overall_score, 2),
                "grade": grade,
                "breakdown": breakdown,
                "recommendations": self._get_overall_recommendations(overall_score, metrics)
            }

            logger.info(f"Calculated overall score: {overall_score:.2f} ({grade})")
            return result

        except Exception as e:
            logger.error(f"Failed to calculate overall score: {e}")
            raise AnalysisError("Failed to calculate overall score")

    def calculate_readability_metrics(self, parsed_content: Dict[str, Any]) -> Dict[str, Any]:
        """
        Calculate readability metrics.

        Args:
            parsed_content: Parsed specification content

        Returns:
            Readability metrics
        """
        try:
            content = parsed_content["content"]
            stats = parsed_content["statistics"]

            # Basic metrics
            total_words = stats["total_words"]
            total_sentences = len(content.split('.'))
            avg_sentence_length = total_words / max(1, total_sentences)

            # Calculate complexity score (0-100)
            complexity_score = self._calculate_complexity_score(avg_sentence_length)

            # Calculate structure score
            structure_score = self._calculate_structure_score(parsed_content["sections"])

            result = {
                "score": round((complexity_score + structure_score) / 2, 2),
                "complexity_score": complexity_score,
                "structure_score": structure_score,
                "total_words": total_words,
                "total_sentences": total_sentences,
                "avg_sentence_length": round(avg_sentence_length, 2),
                "estimated_reading_time": stats["estimated_reading_time_minutes"],
                "metrics": {
                    "tables": stats["tables"],
                    "code_blocks": stats["code_blocks"],
                    "links": stats["links"]
                }
            }

            return result

        except Exception as e:
            logger.error(f"Failed to calculate readability metrics: {e}")
            raise AnalysisError("Failed to calculate readability metrics")

    def calculate_completeness_metrics(self, parsed_content: Dict[str, Any]) -> Dict[str, Any]:
        """
        Calculate specification completeness metrics.

        Args:
            parsed_content: Parsed specification content

        Returns:
            Completeness metrics
        """
        try:
            requirements = parsed_content["requirements"]
            criteria = parsed_content["criteria"]
            sections = parsed_content["sections"]

            # Check for required sections
            required_sections = ["User Scenarios", "Requirements", "Success Criteria"]
            found_sections = [s["title"] for s in sections if s["title"] in required_sections]
            section_score = (len(found_sections) / len(required_sections)) * 100

            # Calculate requirement coverage
            requirement_score = min(100, len(requirements) * 5)  # 5 points per requirement, max 100

            # Calculate criteria coverage
            criteria_score = min(100, len(criteria) * 10)  # 10 points per criterion, max 100

            # Check for quantified criteria
            quantified_criteria = [c for c in criteria if c.get("is_quantified", False)]
            quantification_score = (len(quantified_criteria) / max(1, len(criteria))) * 100

            # Calculate overall completeness
            completeness_score = (section_score + requirement_score + criteria_score + quantification_score) / 4

            result = {
                "score": round(completeness_score, 2),
                "section_score": round(section_score, 2),
                "requirement_score": round(requirement_score, 2),
                "criteria_score": round(criteria_score, 2),
                "quantification_score": round(quantification_score, 2),
                "details": {
                    "total_requirements": len(requirements),
                    "total_criteria": len(criteria),
                    "quantified_criteria": len(quantified_criteria),
                    "found_sections": found_sections,
                    "missing_sections": [s for s in required_sections if s not in found_sections]
                }
            }

            return result

        except Exception as e:
            logger.error(f"Failed to calculate completeness metrics: {e}")
            raise AnalysisError("Failed to calculate completeness metrics")

    def calculate_consistency_metrics(self, parsed_content: Dict[str, Any]) -> Dict[str, Any]:
        """
        Calculate consistency metrics.

        Args:
            parsed_content: Parsed specification content

        Returns:
            Consistency metrics
        """
        try:
            content = parsed_content["content"]
            requirements = parsed_content["requirements"]
            components = parsed_content["components"]

            # Check terminology consistency
            terminology_issues = self._check_terminology_consistency(content)
            terminology_score = max(0, 100 - len(terminology_issues) * 10)

            # Check requirement ID consistency
            id_issues = self._check_requirement_id_consistency(requirements)
            id_score = max(0, 100 - len(id_issues) * 15)

            # Check component reference consistency
            reference_issues = self._check_component_reference_consistency(components, requirements)
            reference_score = max(0, 100 - len(reference_issues) * 12)

            # Calculate overall consistency
            consistency_score = (terminology_score + id_score + reference_score) / 3

            result = {
                "score": round(consistency_score, 2),
                "terminology_score": round(terminology_score, 2),
                "id_score": round(id_score, 2),
                "reference_score": round(reference_score, 2),
                "issues": {
                    "terminology": terminology_issues,
                    "requirement_ids": id_issues,
                    "component_references": reference_issues
                },
                "total_issues": len(terminology_issues) + len(id_issues) + len(reference_issues)
            }

            return result

        except Exception as e:
            logger.error(f"Failed to calculate consistency metrics: {e}")
            raise AnalysisError("Failed to calculate consistency metrics")

    def calculate_specificity_metrics(self, parsed_content: Dict[str, Any]) -> Dict[str, Any]:
        """
        Calculate specificity metrics (how specific and measurable the requirements are).

        Args:
            parsed_content: Parsed specification content

        Returns:
            Specificity metrics
        """
        try:
            requirements = parsed_content["requirements"]
            criteria = parsed_content["criteria"]

            # Analyze requirement specificity
            specific_requirements = 0
            for req in requirements:
                if self._is_requirement_specific(req["text"]):
                    specific_requirements += 1

            requirement_specificity = (specific_requirements / max(1, len(requirements))) * 100

            # Analyze criteria measurability
            measurable_criteria = len([c for c in criteria if c.get("is_quantified", False)])
            criteria_measurability = (measurable_criteria / max(1, len(criteria))) * 100

            # Check for SMART criteria
            smart_criteria = 0
            for crit in criteria:
                if self._is_smart_criteria(crit):
                    smart_criteria += 1

            smart_score = (smart_criteria / max(1, len(criteria))) * 100

            # Calculate overall specificity
            specificity_score = (requirement_specificity + criteria_measurability + smart_score) / 3

            result = {
                "score": round(specificity_score, 2),
                "requirement_specificity": round(requirement_specificity, 2),
                "criteria_measurability": round(criteria_measurability, 2),
                "smart_criteria_score": round(smart_score, 2),
                "details": {
                    "total_requirements": len(requirements),
                    "specific_requirements": specific_requirements,
                    "total_criteria": len(criteria),
                    "measurable_criteria": measurable_criteria,
                    "smart_criteria": smart_criteria
                }
            }

            return result

        except Exception as e:
            logger.error(f"Failed to calculate specificity metrics: {e}")
            raise AnalysisError("Failed to calculate specificity metrics")

    def _calculate_complexity_score(self, avg_sentence_length: float) -> float:
        """Calculate text complexity score."""
        if avg_sentence_length <= 15:
            return 90  # Simple and clear
        elif avg_sentence_length <= 20:
            return 75  # Moderate complexity
        elif avg_sentence_length <= 25:
            return 60  # Somewhat complex
        else:
            return 40  # Too complex

    def _calculate_structure_score(self, sections: List[Dict[str, Any]]) -> float:
        """Calculate document structure score."""
        if not sections:
            return 0

        # Check for proper heading hierarchy
        max_level = max(s["level"] for s in sections)
        has_proper_hierarchy = all(1 <= s["level"] <= max_level for s in sections)

        # Check section balance
        level_counts = {}
        for section in sections:
            level = section["level"]
            level_counts[level] = level_counts.get(level, 0) + 1

        # Penalize too many level 1 sections
        level1_count = level_counts.get(1, 0)
        if level1_count > 5:
            hierarchy_score = 60
        elif level1_count > 3:
            hierarchy_score = 80
        else:
            hierarchy_score = 100

        if not has_proper_hierarchy:
            hierarchy_score -= 20

        return max(0, hierarchy_score)

    def _get_grade(self, score: float) -> str:
        """Get letter grade for score."""
        if score >= 90:
            return "A"
        elif score >= 80:
            return "B"
        elif score >= 70:
            return "C"
        elif score >= 60:
            return "D"
        else:
            return "F"

    def _get_overall_recommendations(self, score: float, metrics: Dict[str, Any]) -> List[str]:
        """Get overall improvement recommendations."""
        recommendations = []

        if score < 70:
            recommendations.append("Overall specification quality needs significant improvement")

        # Specific recommendations based on individual metrics
        readability = metrics.get("readability", {})
        if readability.get("score", 100) < 70:
            recommendations.append("Improve text readability with shorter sentences and simpler language")

        completeness = metrics.get("completeness", {})
        if completeness.get("score", 100) < 80:
            recommendations.append("Add more complete requirements and acceptance criteria")

        consistency = metrics.get("consistency", {})
        if consistency.get("score", 100) < 80:
            recommendations.append("Fix terminology inconsistencies and requirement ID formatting")

        specificity = metrics.get("specificity", {})
        if specificity.get("score", 100) < 80:
            recommendations.append("Make requirements more specific and measurable")

        if not recommendations:
            recommendations.append("Specification quality is good")

        return recommendations

    def _check_terminology_consistency(self, content: str) -> List[Dict[str, Any]]:
        """Check for terminology inconsistencies."""
        issues = []

        # Common inconsistent terms
        term_variations = {
            "requirement": ["需求", "要求", "规格", "功能需求"],
            "component": ["组件", "模块", "部件", "构件"],
            "interface": ["接口", "界面", "交互接口"],
            "validation": ["验证", "校验", "检查", "确认"],
            "clarification": ["澄清", "说明", "解释", "阐述"],
        }

        content_lower = content.lower()

        for standard_term, variations in term_variations.items():
            found_variations = []
            for variation in variations:
                if variation in content_lower:
                    found_variations.append(variation)

            if len(found_variations) > 1:
                issues.append({
                    "type": "terminology",
                    "standard_term": standard_term,
                    "found_variations": found_variations,
                    "recommendation": f"Use consistent term: {found_variations[0]}"
                })

        return issues

    def _check_requirement_id_consistency(self, requirements: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Check for requirement ID consistency."""
        issues = []

        # Check for duplicate IDs
        id_counts = {}
        for req in requirements:
            req_id = req["id"]
            id_counts[req_id] = id_counts.get(req_id, 0) + 1

        for req_id, count in id_counts.items():
            if count > 1:
                issues.append({
                    "type": "duplicate_id",
                    "id": req_id,
                    "count": count,
                    "recommendation": f"Make requirement ID {req_id} unique"
                })

        # Check ID format consistency
        fr_ids = [req["id"] for req in requirements if req["id"].startswith("FR-")]
        if fr_ids:
            # Check for gaps in numbering
            numbers = [int(re.search(r'FR-(\d+)', req_id).group(1)) for req_id in fr_ids if re.search(r'FR-(\d+)', req_id)]
            if numbers:
                expected_numbers = list(range(1, max(numbers) + 1))
                missing_numbers = set(expected_numbers) - set(numbers)
                if missing_numbers:
                    issues.append({
                        "type": "id_gap",
                        "missing_numbers": sorted(list(missing_numbers)),
                        "recommendation": "Fill gaps in requirement numbering"
                    })

        return issues

    def _check_component_reference_consistency(self, components: List[Dict[str, Any]],
                                              requirements: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Check for component reference consistency."""
        issues = []

        # Extract component names from requirements
        component_names = set(comp["name"] for comp in components)

        # Check if components referenced in requirements are defined
        for req in requirements:
            req_text = req["text"].lower()
            for comp_name in component_names:
                if comp_name.lower() in req_text:
                    # Component is referenced, check if it's properly defined
                    defined_components = [c for c in components if c["name"] == comp_name]
                    if not defined_components:
                        issues.append({
                            "type": "undefined_component",
                            "component_name": comp_name,
                            "requirement_id": req["id"],
                            "recommendation": f"Define component {comp_name} or remove reference"
                        })

        return issues

    def _is_requirement_specific(self, requirement_text: str) -> bool:
        """Check if requirement is specific and actionable."""
        specific_indicators = [
            "必须", "应该", "需要", "支持", "提供", "实现", "包含",
            "must", "should", "shall", "will", "support", "provide", "implement"
        ]

        vague_indicators = [
            "适当", "合理", "充分", "有效", "优化", "改善",
            "appropriate", "reasonable", "sufficient", "effective", "optimize", "improve"
        ]

        text_lower = requirement_text.lower()

        # Check for specific indicators
        has_specific = any(indicator in text_lower for indicator in specific_indicators)

        # Check for vague indicators
        has_vague = any(indicator in text_lower for indicator in vague_indicators)

        # Check for measurable aspects
        has_measurable = (
            re.search(r'\d+', requirement_text) or  # Contains numbers
            any(word in text_lower for word in ["秒", "分钟", "小时", "%", "mb", "gb"])  # Time/size units
        )

        return has_specific and not has_vague and (has_measurable or len(requirement_text) > 20)

    def _is_smart_criteria(self, criterion: Dict[str, Any]) -> bool:
        """Check if criterion meets SMART criteria."""
        criterion_text = criterion["text"].lower()

        # Specific - has clear target
        is_specific = bool(re.search(r'\d+', criterion_text))

        # Measurable - has numeric value
        is_measurable = criterion.get("is_quantified", False)

        # Achievable - reasonable targets (basic check)
        is_achievable = True  # Would need domain knowledge for proper assessment

        # Relevant - relates to user stories or requirements (basic check)
        is_relevant = len(criterion_text) > 10

        # Time-bound - has time constraint
        is_time_bound = any(word in criterion_text for word in ["秒", "分钟", "小时", "天", "周", "月"])

        return is_specific and is_measurable and is_achievable and is_relevant