"""
语言流畅度评估器
评估句子复杂度、语法正确性和表达流畅性
"""

import re
import statistics
from typing import Dict, List, Tuple, Optional, Any
from dataclasses import dataclass
from enum import Enum
import logging
from collections import Counter

logger = logging.getLogger(__name__)


class FluencyIssueType(Enum):
    """流畅度问题类型"""
    SENTENCE_TOO_LONG = "sentence_too_long"
    SENTENCE_TOO_SHORT = "sentence_too_short"
    REPETITIVE_STRUCTURE = "repetitive_structure"
    EXCESSIVE_PUNCTUATION = "excessive_punctuation"
    UNCLEAR_EXPRESSION = "unclear_expression"
    GRAMMAR_ERROR = "grammar_error"
    WORD_REPETITION = "word_repetition"


@dataclass
class SentenceAnalysis:
    """句子分析结果"""
    text: str
    length: int
    word_count: int
    complexity_score: float
    has_grammar_issue: bool
    issue_types: List[FluencyIssueType]


@dataclass
class FluencyMetrics:
    """流畅度指标"""
    avg_sentence_length: float
    sentence_length_variance: float
    vocabulary_diversity: float
    punctuation_density: float
    readability_score: float
    perplexity_estimate: float


@dataclass
class FluencyIssue:
    """流畅度问题"""
    sentence_index: int
    issue_type: FluencyIssueType
    description: str
    severity: str  # high, medium, low
    suggestion: str


@dataclass
class FluencyAnalysisResult:
    """流畅度分析结果"""
    overall_score: float
    metrics: FluencyMetrics
    sentence_analyses: List[SentenceAnalysis]
    issues: List[FluencyIssue]
    suggestions: List[Dict[str, Any]]
    rewrite_suggestions: List[Dict[str, Any]]


class FluencyEvaluator:
    """语言流畅度评估器"""
    
    def __init__(self):
        """初始化评估器"""
        # 句子长度标准（中文）
        self.ideal_sentence_length = 20  # 理想句子长度（字）
        self.max_sentence_length = 50    # 最大句子长度
        self.min_sentence_length = 8      # 最小句子长度
        
        # 常见语法错误模式
        self.grammar_patterns = [
            (r'的的', '重复使用"的"'),
            (r'了了', '重复使用"了"'),
            (r'[，。！？；][，。！？；]', '标点符号重复'),
            (r'[^，。！？；：""''（）【】《》]{60,}', '句子过长缺少标点'),
            (r'虽然[^但]{0,50}$', '虽然...但是结构不完整'),
            (r'^但是[^虽然]', '但是前面缺少虽然'),
            (r'不仅[^而且也还]{0,50}$', '不仅...而且结构不完整'),
            (r'因为[^所以]{0,50}$', '因为...所以结构不完整'),
            (r'是是', '重复使用"是"'),
            (r'[一二三四五六七八九十]是', '数字后直接接"是"'),
        ]
        
        # 句式多样性模板
        self.sentence_starters = {
            '陈述': ['', '事实上', '实际上', '显然'],
            '转折': ['但是', '然而', '不过', '可是'],
            '因果': ['因为', '由于', '因此', '所以'],
            '并列': ['同时', '另外', '此外', '而且'],
            '条件': ['如果', '假如', '只要', '除非'],
            '强调': ['特别是', '尤其是', '重要的是', '值得注意的是']
        }
        
        # 高频词汇（用于检测重复）
        self.common_words = {
            '的', '是', '在', '了', '和', '有', '我', '你', '他', '她',
            '这', '那', '个', '们', '来', '去', '到', '说', '要', '就'
        }
        
        # 复杂句式标识
        self.complex_markers = [
            '不仅.*而且', '虽然.*但是', '因为.*所以', '如果.*那么',
            '一方面.*另一方面', '首先.*其次.*最后', '既.*又.*还'
        ]
    
    def evaluate(self, content: str) -> FluencyAnalysisResult:
        """
        评估文本流畅度
        
        Args:
            content: 待评估文本
            
        Returns:
            流畅度分析结果
        """
        if not content or not content.strip():
            return self._empty_result("内容为空")
        
        # 分句
        sentences = self._split_sentences(content)
        if not sentences:
            return self._empty_result("无法分句")
        
        # 分析每个句子
        sentence_analyses = self._analyze_sentences(sentences)
        
        # 计算流畅度指标
        metrics = self._calculate_metrics(content, sentences, sentence_analyses)
        
        # 检测流畅度问题
        issues = self._detect_fluency_issues(sentences, sentence_analyses, metrics)
        
        # 计算总体评分
        overall_score = self._calculate_overall_score(metrics, issues)
        
        # 生成改进建议
        suggestions = self._generate_suggestions(metrics, issues)
        
        # 生成重写建议
        rewrite_suggestions = self._generate_rewrite_suggestions(
            sentences, sentence_analyses, issues
        )
        
        return FluencyAnalysisResult(
            overall_score=overall_score,
            metrics=metrics,
            sentence_analyses=sentence_analyses,
            issues=issues,
            suggestions=suggestions,
            rewrite_suggestions=rewrite_suggestions
        )
    
    def _split_sentences(self, content: str) -> List[str]:
        """分句"""
        # 中文句号、问号、感叹号、分号作为句子结束
        sentence_endings = r'[。！？；]'
        
        # 分句
        sentences = re.split(sentence_endings, content)
        
        # 过滤空句子
        sentences = [s.strip() for s in sentences if s.strip()]
        
        return sentences
    
    def _analyze_sentences(self, sentences: List[str]) -> List[SentenceAnalysis]:
        """分析句子"""
        analyses = []
        
        for sentence in sentences:
            # 计算句子长度
            length = len(sentence)
            
            # 计算词数（中英文混合）
            chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', sentence))
            english_words = len(re.findall(r'\b[a-zA-Z]+\b', sentence))
            word_count = chinese_chars + english_words
            
            # 计算复杂度
            complexity_score = self._calculate_sentence_complexity(sentence)
            
            # 检查语法问题
            has_grammar_issue, issue_types = self._check_grammar_issues(sentence)
            
            analyses.append(SentenceAnalysis(
                text=sentence,
                length=length,
                word_count=word_count,
                complexity_score=complexity_score,
                has_grammar_issue=has_grammar_issue,
                issue_types=issue_types
            ))
        
        return analyses
    
    def _calculate_sentence_complexity(self, sentence: str) -> float:
        """计算句子复杂度"""
        score = 0.0
        
        # 长度因素
        length = len(sentence)
        if self.min_sentence_length <= length <= self.max_sentence_length:
            # 在合理范围内
            length_score = 1.0 - abs(length - self.ideal_sentence_length) / self.ideal_sentence_length
            score += max(0, length_score) * 0.3
        else:
            # 过长或过短
            score -= 0.2
        
        # 从句和复杂结构
        for pattern in self.complex_markers:
            if re.search(pattern, sentence):
                score += 0.1
        
        # 标点符号使用
        comma_count = sentence.count('，')
        if 1 <= comma_count <= 3:
            score += 0.2
        elif comma_count > 5:
            score -= 0.1
        
        # 词汇多样性（简化版）
        unique_chars = len(set(re.findall(r'[\u4e00-\u9fff]', sentence)))
        total_chars = len(re.findall(r'[\u4e00-\u9fff]', sentence))
        if total_chars > 0:
            diversity = unique_chars / total_chars
            score += diversity * 0.2
        
        return min(1.0, max(0.0, score))
    
    def _check_grammar_issues(self, sentence: str) -> Tuple[bool, List[FluencyIssueType]]:
        """检查语法问题"""
        has_issue = False
        issue_types = []
        
        # 检查语法错误模式
        for pattern, _ in self.grammar_patterns:
            if re.search(pattern, sentence):
                has_issue = True
                issue_types.append(FluencyIssueType.GRAMMAR_ERROR)
                break
        
        # 检查句子长度
        if len(sentence) > self.max_sentence_length:
            has_issue = True
            issue_types.append(FluencyIssueType.SENTENCE_TOO_LONG)
        elif len(sentence) < self.min_sentence_length:
            has_issue = True
            issue_types.append(FluencyIssueType.SENTENCE_TOO_SHORT)
        
        # 检查标点符号密度
        punctuation_count = len(re.findall(r'[，。！？；：、]', sentence))
        if punctuation_count > len(sentence) / 10:
            has_issue = True
            issue_types.append(FluencyIssueType.EXCESSIVE_PUNCTUATION)
        
        return has_issue, issue_types
    
    def _calculate_metrics(self, content: str, sentences: List[str],
                          analyses: List[SentenceAnalysis]) -> FluencyMetrics:
        """计算流畅度指标"""
        # 平均句子长度
        sentence_lengths = [len(s) for s in sentences]
        avg_sentence_length = statistics.mean(sentence_lengths) if sentence_lengths else 0
        
        # 句子长度方差
        sentence_length_variance = statistics.variance(sentence_lengths) if len(sentence_lengths) > 1 else 0
        
        # 词汇多样性
        all_words = re.findall(r'[\u4e00-\u9fff]+|\b[a-zA-Z]+\b', content.lower())
        unique_words = set(all_words)
        vocabulary_diversity = len(unique_words) / len(all_words) if all_words else 0
        
        # 标点符号密度
        punctuation_count = len(re.findall(r'[，。！？；：、]', content))
        punctuation_density = punctuation_count / len(content) if content else 0
        
        # 可读性评分（基于多个因素）
        readability_score = self._calculate_readability(
            avg_sentence_length,
            vocabulary_diversity,
            analyses
        )
        
        # 困惑度估计（简化版）
        perplexity_estimate = self._estimate_perplexity(content)
        
        return FluencyMetrics(
            avg_sentence_length=avg_sentence_length,
            sentence_length_variance=sentence_length_variance,
            vocabulary_diversity=vocabulary_diversity,
            punctuation_density=punctuation_density,
            readability_score=readability_score,
            perplexity_estimate=perplexity_estimate
        )
    
    def _calculate_readability(self, avg_sentence_length: float,
                              vocabulary_diversity: float,
                              analyses: List[SentenceAnalysis]) -> float:
        """计算可读性评分"""
        score = 0.0
        
        # 句子长度因素
        if 15 <= avg_sentence_length <= 30:
            score += 0.3
        else:
            score += 0.1
        
        # 词汇多样性因素
        if 0.4 <= vocabulary_diversity <= 0.7:
            score += 0.3
        else:
            score += 0.1
        
        # 句子复杂度因素
        avg_complexity = statistics.mean(
            [a.complexity_score for a in analyses]
        ) if analyses else 0
        score += avg_complexity * 0.4
        
        return min(1.0, score)
    
    def _estimate_perplexity(self, content: str) -> float:
        """
        估计困惑度（简化版）
        基于字符级n-gram的简单估计
        """
        # 计算2-gram的多样性
        bigrams = []
        for i in range(len(content) - 1):
            bigrams.append(content[i:i+2])
        
        if not bigrams:
            return 1.0
        
        # 计算重复率
        unique_bigrams = set(bigrams)
        repetition_rate = 1 - (len(unique_bigrams) / len(bigrams))
        
        # 转换为困惑度估计（值越低越好）
        perplexity = 1.0 + repetition_rate * 10
        
        return perplexity
    
    def _detect_fluency_issues(self, sentences: List[str],
                              analyses: List[SentenceAnalysis],
                              metrics: FluencyMetrics) -> List[FluencyIssue]:
        """检测流畅度问题"""
        issues = []
        
        # 检查每个句子的问题
        for i, (sentence, analysis) in enumerate(zip(sentences, analyses)):
            # 句子过长
            if FluencyIssueType.SENTENCE_TOO_LONG in analysis.issue_types:
                issues.append(FluencyIssue(
                    sentence_index=i,
                    issue_type=FluencyIssueType.SENTENCE_TOO_LONG,
                    description=f"句子过长（{analysis.length}字），影响阅读流畅性",
                    severity="medium",
                    suggestion="建议拆分为多个短句，或使用标点符号分隔"
                ))
            
            # 句子过短
            if FluencyIssueType.SENTENCE_TOO_SHORT in analysis.issue_types:
                issues.append(FluencyIssue(
                    sentence_index=i,
                    issue_type=FluencyIssueType.SENTENCE_TOO_SHORT,
                    description=f"句子过短（{analysis.length}字），表达不够充分",
                    severity="low",
                    suggestion="建议扩充内容或与相邻句子合并"
                ))
            
            # 语法错误
            if analysis.has_grammar_issue:
                for pattern, error_desc in self.grammar_patterns:
                    if re.search(pattern, sentence):
                        issues.append(FluencyIssue(
                            sentence_index=i,
                            issue_type=FluencyIssueType.GRAMMAR_ERROR,
                            description=error_desc,
                            severity="high",
                            suggestion="请检查并修正语法错误"
                        ))
                        break
        
        # 检查句式重复
        sentence_starts = [s[:5] if len(s) >= 5 else s for s in sentences]
        start_counter = Counter(sentence_starts)
        
        for start, count in start_counter.items():
            if count >= 3 and len(start) >= 2:
                issues.append(FluencyIssue(
                    sentence_index=-1,  # 全局问题
                    issue_type=FluencyIssueType.REPETITIVE_STRUCTURE,
                    description=f'多个句子以"{start}"开头，句式单调',
                    severity="medium",
                    suggestion="建议使用不同的句式开头，增加文章的节奏感"
                ))
        
        # 检查词汇重复
        if metrics.vocabulary_diversity < 0.3:
            issues.append(FluencyIssue(
                sentence_index=-1,
                issue_type=FluencyIssueType.WORD_REPETITION,
                description="词汇重复率过高，表达单调",
                severity="medium",
                suggestion="建议使用同义词或不同表达方式"
            ))
        
        return issues
    
    def _calculate_overall_score(self, metrics: FluencyMetrics,
                                issues: List[FluencyIssue]) -> float:
        """计算总体评分"""
        base_score = 0.0
        
        # 可读性贡献
        base_score += metrics.readability_score * 0.4
        
        # 词汇多样性贡献
        diversity_score = min(1.0, metrics.vocabulary_diversity * 2)
        base_score += diversity_score * 0.2
        
        # 句子长度合理性
        if 15 <= metrics.avg_sentence_length <= 35:
            base_score += 0.2
        else:
            base_score += 0.05
        
        # 困惑度影响（反向）
        perplexity_penalty = min(0.2, metrics.perplexity_estimate / 50)
        base_score -= perplexity_penalty
        
        # 问题扣分
        high_issues = sum(1 for i in issues if i.severity == "high")
        medium_issues = sum(1 for i in issues if i.severity == "medium")
        
        penalty = high_issues * 0.1 + medium_issues * 0.05
        
        return max(0.0, min(1.0, base_score - penalty))
    
    def _generate_suggestions(self, metrics: FluencyMetrics,
                            issues: List[FluencyIssue]) -> List[Dict[str, Any]]:
        """生成改进建议"""
        suggestions = []
        
        # 基于指标的建议
        if metrics.avg_sentence_length > 40:
            suggestions.append({
                "type": "fluency",
                "priority": "high",
                "description": "缩短句子长度",
                "detail": f"平均句长{metrics.avg_sentence_length:.1f}字，建议控制在20-30字",
                "tips": [
                    "将长句拆分为多个短句",
                    "删除冗余的修饰语",
                    "使用标点符号适当断句"
                ],
                "impact": 0.2
            })
        
        if metrics.vocabulary_diversity < 0.3:
            suggestions.append({
                "type": "fluency",
                "priority": "medium",
                "description": "丰富词汇表达",
                "detail": "词汇重复率较高，建议使用多样化的表达",
                "tips": [
                    "使用同义词替换高频词",
                    "采用不同的表达方式",
                    "适当使用成语或专业术语"
                ],
                "impact": 0.15
            })
        
        if metrics.sentence_length_variance > 500:
            suggestions.append({
                "type": "fluency",
                "priority": "low",
                "description": "平衡句子长度",
                "detail": "句子长度差异过大，影响阅读节奏",
                "tips": [
                    "避免极长和极短句子的交替",
                    "保持句子长度的相对一致性",
                    "通过句子长度变化创造节奏感"
                ],
                "impact": 0.1
            })
        
        # 基于问题的建议
        grammar_issues = [i for i in issues if i.issue_type == FluencyIssueType.GRAMMAR_ERROR]
        if grammar_issues:
            suggestions.append({
                "type": "fluency",
                "priority": "high",
                "description": "修正语法错误",
                "detail": f"发现{len(grammar_issues)}处语法问题需要修正",
                "locations": [f"第{i.sentence_index + 1}句" for i in grammar_issues[:3]],
                "impact": 0.25
            })
        
        return suggestions
    
    def _generate_rewrite_suggestions(self, sentences: List[str],
                                     analyses: List[SentenceAnalysis],
                                     issues: List[FluencyIssue]) -> List[Dict[str, Any]]:
        """生成句子重写建议"""
        rewrite_suggestions = []
        
        for issue in issues:
            if issue.sentence_index < 0:
                continue  # 跳过全局问题
            
            sentence = sentences[issue.sentence_index]
            
            if issue.issue_type == FluencyIssueType.SENTENCE_TOO_LONG:
                # 提供拆分建议
                rewrite = self._suggest_sentence_split(sentence)
                if rewrite:
                    rewrite_suggestions.append({
                        "sentence_index": issue.sentence_index,
                        "original": sentence,
                        "suggested": rewrite,
                        "reason": "句子过长，建议拆分",
                        "improvement": "提高可读性"
                    })
            
            elif issue.issue_type == FluencyIssueType.GRAMMAR_ERROR:
                # 提供语法修正建议
                corrected = self._suggest_grammar_correction(sentence)
                if corrected and corrected != sentence:
                    rewrite_suggestions.append({
                        "sentence_index": issue.sentence_index,
                        "original": sentence,
                        "suggested": corrected,
                        "reason": "语法错误修正",
                        "improvement": "提高语法正确性"
                    })
        
        return rewrite_suggestions
    
    def _suggest_sentence_split(self, sentence: str) -> str:
        """建议句子拆分"""
        # 寻找可能的拆分点
        split_points = []
        
        # 查找逗号位置
        for match in re.finditer(r'，', sentence):
            pos = match.start()
            # 检查逗号前后是否适合拆分
            if 10 < pos < len(sentence) - 10:
                split_points.append(pos)
        
        if len(split_points) >= 2:
            # 选择中间的拆分点
            mid_point = split_points[len(split_points) // 2]
            part1 = sentence[:mid_point]
            part2 = sentence[mid_point + 1:]
            return f"{part1}。{part2}"
        
        return sentence
    
    def _suggest_grammar_correction(self, sentence: str) -> str:
        """建议语法修正"""
        corrected = sentence
        
        # 应用语法修正规则
        corrections = [
            (r'的的', '的'),
            (r'了了', '了'),
            (r'是是', '是'),
            (r'([，。！？；])\1+', r'\1'),  # 去除重复标点
            (r'虽然(.{0,50})$', r'虽然\1，但是'),  # 补充缺失的但是
        ]
        
        for pattern, replacement in corrections:
            corrected = re.sub(pattern, replacement, corrected)
        
        return corrected
    
    def _empty_result(self, reason: str) -> FluencyAnalysisResult:
        """返回空结果"""
        return FluencyAnalysisResult(
            overall_score=0.0,
            metrics=FluencyMetrics(0, 0, 0, 0, 0, 0),
            sentence_analyses=[],
            issues=[FluencyIssue(-1, FluencyIssueType.UNCLEAR_EXPRESSION,
                               reason, "high", "")],
            suggestions=[],
            rewrite_suggestions=[]
        )