"""
Text analysis utilities for specification fixing.

Provides text processing, analysis, and extraction capabilities
for specification documents.
"""

import re
from typing import Dict, List, Any, Optional, Tuple
from pathlib import Path
import markdown

from ..utils.logger import get_logger
from ..utils.exceptions import ParseError

logger = get_logger(__name__)


class TextAnalyzer:
    """Text analyzer for specification documents."""

    def __init__(self):
        """Initialize text analyzer."""
        self.requirement_patterns = [
            r"FR-\d+:\s*(.+)",
            r"系统必须(.+)",
            r"用户必须能够(.+)",
            r"系统应该(.+)",
            r"用户应该能够(.+)",
        ]

        self.component_patterns = [
            r"([a-zA-Z_]+)_component",
            r"([a-zA-Z_]+)组件",
            r"component:\s*([a-zA-Z_]+)",
            r"组件:\s*([a-zA-Z_]+)",
        ]

        self.criteria_patterns = [
            r"SC-\d+:\s*(.+)",
            r"成功率.*?(\d+%)",
            r"响应时间.*?(\d+秒)",
            r"准确率.*?(\d+%)",
            r"完成时间.*?(\d+秒)",
        ]

    def parse_markdown(self, file_path: str) -> Dict[str, Any]:
        """
        Parse markdown specification file.

        Args:
            file_path: Path to markdown file

        Returns:
            Parsed content dictionary

        Raises:
            ParseError: If parsing fails
        """
        try:
            file_path_obj = Path(file_path)
            if not file_path_obj.exists():
                raise FileNotFoundError(f"File not found: {file_path}")

            with open(file_path_obj, 'r', encoding='utf-8') as f:
                content = f.read()

            # Parse markdown to HTML for better text extraction
            html_content = markdown.markdown(content, extensions=['tables', 'fenced_code'])

            # Extract sections
            sections = self._extract_sections(content)

            # Extract requirements
            requirements = self._extract_requirements(content)

            # Extract components
            components = self._extract_components(content)

            # Extract criteria
            criteria = self._extract_criteria(content)

            result = {
                "file_path": str(file_path_obj),
                "raw_content": content,
                "html_content": html_content,
                "sections": sections,
                "requirements": requirements,
                "components": components,
                "criteria": criteria,
                "statistics": self._calculate_statistics(content)
            }

            logger.info(f"Successfully parsed {file_path}")
            return result

        except Exception as e:
            logger.error(f"Failed to parse {file_path}: {e}")
            raise ParseError(f"Failed to parse specification: {e}", file_path=str(file_path))

    def _extract_sections(self, content: str) -> List[Dict[str, Any]]:
        """Extract sections from markdown content."""
        sections = []
        lines = content.split('\n')
        current_section = None
        current_content = []

        for line_num, line in enumerate(lines, 1):
            # Check for headers
            header_match = re.match(r'^(#{1,6})\s+(.+)', line)
            if header_match:
                # Save previous section
                if current_section:
                    current_section["content"] = '\n'.join(current_content)
                    sections.append(current_section)

                # Start new section
                level = len(header_match.group(1))
                title = header_match.group(2).strip()
                current_section = {
                    "level": level,
                    "title": title,
                    "line_number": line_num,
                    "content": ""
                }
                current_content = []
            else:
                if current_section:
                    current_content.append(line)

        # Save last section
        if current_section:
            current_section["content"] = '\n'.join(current_content)
            sections.append(current_section)

        return sections

    def _extract_requirements(self, content: str) -> List[Dict[str, Any]]:
        """Extract functional requirements from content."""
        requirements = []
        lines = content.split('\n')

        for line_num, line in enumerate(lines, 1):
            line = line.strip()

            for pattern in self.requirement_patterns:
                match = re.search(pattern, line, re.IGNORECASE)
                if match:
                    requirement_text = match.group(1).strip()

                    # Extract requirement ID if present
                    id_match = re.search(r'(FR-\d+)', line)
                    req_id = id_match.group(1) if id_match else f"REQ-{len(requirements) + 1}"

                    requirements.append({
                        "id": req_id,
                        "text": requirement_text,
                        "line_number": line_num,
                        "raw_line": line,
                        "type": self._classify_requirement(requirement_text)
                    })

        return requirements

    def _extract_components(self, content: str) -> List[Dict[str, Any]]:
        """Extract component references from content."""
        components = []
        lines = content.split('\n')

        for line_num, line in enumerate(lines, 1):
            line = line.strip()

            for pattern in self.component_patterns:
                matches = re.findall(pattern, line, re.IGNORECASE)
                for match in matches:
                    component_name = match.strip()

                    components.append({
                        "name": component_name,
                        "line_number": line_num,
                        "context": line,
                        "type": self._classify_component(component_name, line)
                    })

        return components

    def _extract_criteria(self, content: str) -> List[Dict[str, Any]]:
        """Extract acceptance criteria from content."""
        criteria = []
        lines = content.split('\n')

        for line_num, line in enumerate(lines, 1):
            line = line.strip()

            for pattern in self.criteria_patterns:
                match = re.search(pattern, line, re.IGNORECASE)
                if match:
                    criteria_text = match.group(1).strip()

                    # Extract criteria ID if present
                    id_match = re.search(r'(SC-\d+)', line)
                    crit_id = id_match.group(1) if id_match else f"CRIT-{len(criteria) + 1}"

                    # Extract numeric values
                    numeric_values = re.findall(r'(\d+(?:\.\d+)?)\s*(%|秒|分钟|小时|MB|GB)', line)

                    criteria.append({
                        "id": crit_id,
                        "text": criteria_text,
                        "line_number": line_num,
                        "raw_line": line,
                        "numeric_values": numeric_values,
                        "is_quantified": len(numeric_values) > 0
                    })

        return criteria

    def _classify_requirement(self, text: str) -> str:
        """Classify requirement type based on content."""
        text_lower = text.lower()

        if any(word in text_lower for word in ["分析", "评估", "检查", "验证"]):
            return "analysis"
        elif any(word in text_lower for word in ["用户", "界面", "交互", "操作"]):
            return "user_interface"
        elif any(word in text_lower for word in ["数据", "存储", "数据库", "文件"]):
            return "data"
        elif any(word in text_lower for word in ["安全", "权限", "认证", "授权"]):
            return "security"
        elif any(word in text_lower for word in ["性能", "速度", "响应", "负载"]):
            return "performance"
        else:
            return "functional"

    def _classify_component(self, component_name: str, context: str) -> str:
        """Classify component type based on name and context."""
        context_lower = context.lower()
        name_lower = component_name.lower()

        if any(word in name_lower for word in ["service", "服务"]):
            return "service"
        elif any(word in name_lower for word in ["model", "模型", "entity"]):
            return "model"
        elif any(word in name_lower for word in ["controller", "control", "控制器"]):
            return "controller"
        elif any(word in name_lower for word in ["util", "tool", "工具"]):
            return "utility"
        elif any(word in name_lower for word in ["core", "核心"]):
            return "core"
        else:
            return "component"

    def _calculate_statistics(self, content: str) -> Dict[str, Any]:
        """Calculate document statistics."""
        lines = content.split('\n')
        words = content.split()

        # Count different types of content
        code_blocks = len(re.findall(r'```', content)) // 2
        tables = len(re.findall(r'\|.*\|', content))
        links = len(re.findall(r'\[.*\]\(.*\)', content))

        return {
            "total_lines": len(lines),
            "total_words": len(words),
            "total_characters": len(content),
            "code_blocks": code_blocks,
            "tables": tables,
            "links": links,
            "estimated_reading_time_minutes": max(1, len(words) // 200)  # 200 words per minute
        }

    def find_inconsistencies(self, parsed_content: Dict[str, Any]) -> List[Dict[str, Any]]:
        """Find terminology inconsistencies in the document."""
        inconsistencies = []

        # Extract terminology
        terms = self._extract_terminology(parsed_content["content"])

        # Check for variations
        for term, variations in terms.items():
            if len(variations) > 1:
                inconsistencies.append({
                    "type": "terminology",
                    "term": term,
                    "variations": variations,
                    "suggestion": f"Use consistent term: {variations[0]}"
                })

        return inconsistencies

    def _extract_terminology(self, content: str) -> Dict[str, List[str]]:
        """Extract terminology and their variations."""
        # Common specification terms to check
        term_patterns = {
            "requirement": ["需求", "要求", "规格"],
            "component": ["组件", "模块", "部件"],
            "interface": ["接口", "界面", "交互"],
            "validation": ["验证", "校验", "检查"],
            "clarification": ["澄清", "说明", "解释"],
            "specification": ["规范", "规格书", "说明书"],
        }

        found_terms = {}
        content_lower = content.lower()

        for standard_term, variations in term_patterns.items():
            found_variations = []
            for variation in variations:
                if variation in content_lower:
                    found_variations.append(variation)

            if found_variations:
                found_terms[standard_term] = found_variations

        return found_terms

    def analyze_readability(self, content: str) -> Dict[str, Any]:
        """Analyze text readability metrics."""
        try:
            import textstat

            sentences = content.split('.')
            words = content.split()

            # Basic metrics
            avg_sentence_length = len(words) / max(1, len(sentences))
            avg_word_length = sum(len(word) for word in words) / max(1, len(words))

            # Advanced metrics using textstat
            flesch_score = textstat.flesch_reading_ease(content)
            fog_index = textstat.gunning_fog(content)

            # Determine readability level
            if flesch_score >= 90:
                readability_level = "Very Easy"
            elif flesch_score >= 80:
                readability_level = "Easy"
            elif flesch_score >= 70:
                readability_level = "Fairly Easy"
            elif flesch_score >= 60:
                readability_level = "Standard"
            elif flesch_score >= 50:
                readability_level = "Fairly Difficult"
            elif flesch_score >= 30:
                readability_level = "Difficult"
            else:
                readability_level = "Very Difficult"

            return {
                "flesch_reading_ease": flesch_score,
                "gunning_fog_index": fog_index,
                "avg_sentence_length": avg_sentence_length,
                "avg_word_length": avg_word_length,
                "readability_level": readability_level,
                "recommendations": self._get_readability_recommendations(flesch_score, fog_index)
            }

        except ImportError:
            logger.warning("textstat not available, using basic readability analysis")
            return self._basic_readability_analysis(content)

    def _basic_readability_analysis(self, content: str) -> Dict[str, Any]:
        """Basic readability analysis without textstat."""
        sentences = content.split('.')
        words = content.split()

        avg_sentence_length = len(words) / max(1, len(sentences))
        avg_word_length = sum(len(word) for word in words) / max(1, len(words))

        # Simple scoring
        if avg_sentence_length > 25:
            complexity = "High"
        elif avg_sentence_length > 15:
            complexity = "Medium"
        else:
            complexity = "Low"

        return {
            "avg_sentence_length": avg_sentence_length,
            "avg_word_length": avg_word_length,
            "complexity": complexity,
            "recommendations": self._get_basic_recommendations(avg_sentence_length, avg_word_length)
        }

    def _get_readability_recommendations(self, flesch_score: float, fog_index: float) -> List[str]:
        """Get readability improvement recommendations."""
        recommendations = []

        if flesch_score < 60:
            recommendations.append("Consider using shorter sentences")
            recommendations.append("Use simpler words where possible")

        if fog_index > 12:
            recommendations.append("Reduce complex words")
            recommendations.append("Break down long sentences")

        if not recommendations:
            recommendations.append("Readability is good")

        return recommendations

    def _get_basic_recommendations(self, avg_sentence_length: float, avg_word_length: float) -> List[str]:
        """Get basic readability recommendations."""
        recommendations = []

        if avg_sentence_length > 20:
            recommendations.append("Consider using shorter sentences")

        if avg_word_length > 6:
            recommendations.append("Use simpler words where possible")

        if not recommendations:
            recommendations.append("Text complexity seems reasonable")

        return recommendations