"""
自动校对优化器

提供语法和拼写检查、语言风格优化算法和逻辑一致性检查功能
"""

import logging
import json
import re
from typing import Dict, List, Optional, Any, Tuple, Union, Callable, Set
from dataclasses import dataclass, field
from enum import Enum
import datetime
from pathlib import Path
import string

from ..models.base_models import BaseModel
from .base_component import BaseComponent

logger = logging.getLogger(__name__)


class ErrorType(Enum):
    """错误类型"""
    SPELLING = "spelling"                    # 拼写错误
    GRAMMAR = "grammar"                      # 语法错误
    PUNCTUATION = "punctuation"              # 标点符号错误
    CAPITALIZATION = "capitalization"        # 大小写错误
    WORD_CHOICE = "word_choice"              # 词汇选择
    SENTENCE_STRUCTURE = "sentence_structure" # 句子结构
    TENSE_CONSISTENCY = "tense_consistency"   # 时态一致性
    VOICE_CONSISTENCY = "voice_consistency"   # 语态一致性
    STYLE_CONSISTENCY = "style_consistency"   # 风格一致性
    REDUNDANCY = "redundancy"                # 冗余
    CLARITY = "clarity"                      # 清晰度
    CONCISENESS = "conciseness"              # 简洁性
    FORMALITY = "formality"                  # 正式性
    ACADEMIC_TONE = "academic_tone"          # 学术语调
    LOGICAL_FLOW = "logical_flow"            # 逻辑流程
    COHERENCE = "coherence"                  # 连贯性
    CONSISTENCY = "consistency"              # 一致性


class SeverityLevel(Enum):
    """严重程度"""
    LOW = "low"                             # 低
    MEDIUM = "medium"                       # 中
    HIGH = "high"                           # 高
    CRITICAL = "critical"                   # 严重


class WritingStyle(Enum):
    """写作风格"""
    ACADEMIC = "academic"                   # 学术风格
    FORMAL = "formal"                       # 正式风格
    TECHNICAL = "technical"                 # 技术风格
    SCIENTIFIC = "scientific"               # 科学风格
    BUSINESS = "business"                   # 商务风格
    CASUAL = "casual"                       # 随意风格


@dataclass
class ProofreadingError(BaseModel):
    """校对错误"""
    error_id: str = ""
    error_type: ErrorType = ErrorType.SPELLING
    severity: SeverityLevel = SeverityLevel.MEDIUM
    
    # 位置信息
    start_position: int = 0
    end_position: int = 0
    line_number: int = 0
    column_number: int = 0
    
    # 错误内容
    original_text: str = ""
    context: str = ""
    
    # 建议修改
    suggestions: List[str] = field(default_factory=list)
    preferred_suggestion: Optional[str] = None
    explanation: str = ""
    
    # 规则信息
    rule_id: str = ""
    rule_description: str = ""
    
    # 置信度
    confidence: float = 0.0
    
    # 元数据
    detected_time: datetime.datetime = field(default_factory=datetime.datetime.now)
    
    def get_corrected_text(self) -> str:
        """获取修正后的文本"""
        if self.preferred_suggestion:
            return self.preferred_suggestion
        elif self.suggestions:
            return self.suggestions[0]
        else:
            return self.original_text


@dataclass
class StyleIssue(BaseModel):
    """风格问题"""
    issue_id: str = ""
    issue_type: str = ""
    description: str = ""
    
    # 位置信息
    start_position: int = 0
    end_position: int = 0
    affected_text: str = ""
    
    # 改进建议
    improvement_suggestion: str = ""
    example: str = ""
    
    # 重要性
    importance: SeverityLevel = SeverityLevel.MEDIUM
    
    # 风格相关
    target_style: WritingStyle = WritingStyle.ACADEMIC
    current_style_score: float = 0.0
    improved_style_score: float = 0.0


@dataclass
class ConsistencyIssue(BaseModel):
    """一致性问题"""
    issue_id: str = ""
    issue_type: str = ""
    description: str = ""
    
    # 不一致的实例
    inconsistent_instances: List[Dict[str, Any]] = field(default_factory=list)
    
    # 建议的统一格式
    recommended_format: str = ""
    
    # 影响范围
    affected_sections: List[str] = field(default_factory=list)
    
    # 严重程度
    severity: SeverityLevel = SeverityLevel.MEDIUM


@dataclass
class ProofreadingResult(BaseModel):
    """校对结果"""
    success: bool = True
    error_message: str = ""
    
    # 原始文本信息
    original_text: str = ""
    original_word_count: int = 0
    original_sentence_count: int = 0
    
    # 校对发现的问题
    errors: List[ProofreadingError] = field(default_factory=list)
    style_issues: List[StyleIssue] = field(default_factory=list)
    consistency_issues: List[ConsistencyIssue] = field(default_factory=list)
    
    # 修正后的文本
    corrected_text: str = ""
    corrected_word_count: int = 0
    corrected_sentence_count: int = 0
    
    # 统计信息
    total_errors: int = 0
    errors_by_type: Dict[str, int] = field(default_factory=dict)
    errors_by_severity: Dict[str, int] = field(default_factory=dict)
    
    # 质量评估
    original_quality_score: float = 0.0
    improved_quality_score: float = 0.0
    improvement_percentage: float = 0.0
    
    # 处理信息
    processing_time_ms: float = 0.0
    rules_applied: List[str] = field(default_factory=list)
    
    def get_error_summary(self) -> Dict[str, Any]:
        """获取错误摘要"""
        return {
            'total_errors': self.total_errors,
            'by_type': self.errors_by_type,
            'by_severity': self.errors_by_severity,
            'most_common_error': max(self.errors_by_type.items(), key=lambda x: x[1])[0] if self.errors_by_type else None,
            'critical_errors': self.errors_by_severity.get('critical', 0),
            'high_priority_errors': self.errors_by_severity.get('high', 0)
        }
    
    def get_improvement_summary(self) -> Dict[str, Any]:
        """获取改进摘要"""
        return {
            'quality_improvement': self.improvement_percentage,
            'word_count_change': self.corrected_word_count - self.original_word_count,
            'sentence_count_change': self.corrected_sentence_count - self.original_sentence_count,
            'errors_fixed': len([e for e in self.errors if e.preferred_suggestion]),
            'style_improvements': len(self.style_issues),
            'consistency_improvements': len(self.consistency_issues)
        }


@dataclass
class ProofreadingConfig(BaseModel):
    """校对配置"""
    # 检查类型
    check_spelling: bool = True
    check_grammar: bool = True
    check_punctuation: bool = True
    check_style: bool = True
    check_consistency: bool = True
    check_logic: bool = True
    
    # 风格设置
    target_style: WritingStyle = WritingStyle.ACADEMIC
    formality_level: float = 0.8  # 0-1, 1为最正式
    
    # 严重程度过滤
    min_severity: SeverityLevel = SeverityLevel.LOW
    
    # 自动修正设置
    auto_fix_spelling: bool = True
    auto_fix_punctuation: bool = True
    auto_fix_capitalization: bool = True
    
    # 语言设置
    language: str = "en"  # en, zh, etc.
    dialect: str = "US"   # US, UK, etc.
    
    # 学科特定设置
    academic_field: str = ""  # computer_science, biology, etc.
    
    # 质量阈值
    min_confidence: float = 0.7
    max_suggestions: int = 5
    
    # 处理选项
    preserve_formatting: bool = True
    track_changes: bool = True


class ProofreadingOptimizer(BaseComponent):
    """自动校对优化器"""
    
    def get_required_configs(self) -> List[str]:
        """获取必需的配置项"""
        return []
    
    def _setup_component(self):
        """设置组件特定的初始化逻辑"""
        self.logger.info("自动校对优化器初始化")
        
        # 加载词典和规则
        self._load_dictionaries()
        self._load_grammar_rules()
        self._load_style_rules()
        
        # 检查器
        self.checkers = {
            ErrorType.SPELLING: self._check_spelling,
            ErrorType.GRAMMAR: self._check_grammar,
            ErrorType.PUNCTUATION: self._check_punctuation,
            ErrorType.CAPITALIZATION: self._check_capitalization,
            ErrorType.WORD_CHOICE: self._check_word_choice,
            ErrorType.SENTENCE_STRUCTURE: self._check_sentence_structure,
            ErrorType.TENSE_CONSISTENCY: self._check_tense_consistency,
            ErrorType.STYLE_CONSISTENCY: self._check_style_consistency,
            ErrorType.REDUNDANCY: self._check_redundancy,
            ErrorType.CLARITY: self._check_clarity,
            ErrorType.ACADEMIC_TONE: self._check_academic_tone
        }
        
        # 优化器
        self.optimizers = {
            'conciseness': self._optimize_conciseness,
            'clarity': self._optimize_clarity,
            'flow': self._optimize_flow,
            'tone': self._optimize_tone,
            'consistency': self._optimize_consistency
        }
        
        self.logger.info("自动校对优化器初始化完成")    

    def proofread_text(self, 
                      text: str, 
                      config: ProofreadingConfig = None) -> ProofreadingResult:
        """
        校对文本
        
        Args:
            text: 输入文本
            config: 校对配置
            
        Returns:
            校对结果
        """
        try:
            start_time = datetime.datetime.now()
            
            if config is None:
                config = ProofreadingConfig()
            
            # 初始化结果
            result = ProofreadingResult(
                original_text=text,
                original_word_count=len(text.split()),
                original_sentence_count=len(self._split_sentences(text))
            )
            
            # 执行各种检查
            all_errors = []
            
            # 拼写检查
            if config.check_spelling:
                spelling_errors = self._check_spelling(text, config)
                all_errors.extend(spelling_errors)
                result.rules_applied.append("spelling_check")
            
            # 语法检查
            if config.check_grammar:
                grammar_errors = self._check_grammar(text, config)
                all_errors.extend(grammar_errors)
                result.rules_applied.append("grammar_check")
            
            # 标点符号检查
            if config.check_punctuation:
                punctuation_errors = self._check_punctuation(text, config)
                all_errors.extend(punctuation_errors)
                result.rules_applied.append("punctuation_check")
            
            # 风格检查
            if config.check_style:
                style_issues = self._check_style(text, config)
                result.style_issues = style_issues
                result.rules_applied.append("style_check")
            
            # 一致性检查
            if config.check_consistency:
                consistency_issues = self._check_consistency(text, config)
                result.consistency_issues = consistency_issues
                result.rules_applied.append("consistency_check")
            
            # 过滤和排序错误
            filtered_errors = self._filter_errors(all_errors, config)
            result.errors = sorted(filtered_errors, key=lambda x: x.start_position)
            
            # 应用修正
            result.corrected_text = self._apply_corrections(text, result.errors, config)
            result.corrected_word_count = len(result.corrected_text.split())
            result.corrected_sentence_count = len(self._split_sentences(result.corrected_text))
            
            # 计算统计信息
            result.total_errors = len(result.errors)
            result.errors_by_type = self._count_errors_by_type(result.errors)
            result.errors_by_severity = self._count_errors_by_severity(result.errors)
            
            # 质量评估
            result.original_quality_score = self._calculate_quality_score(text, config)
            result.improved_quality_score = self._calculate_quality_score(result.corrected_text, config)
            result.improvement_percentage = (
                (result.improved_quality_score - result.original_quality_score) / 
                result.original_quality_score * 100
                if result.original_quality_score > 0 else 0
            )
            
            # 计算处理时间
            processing_time = (datetime.datetime.now() - start_time).total_seconds() * 1000
            result.processing_time_ms = processing_time
            
            self.logger.info(f"文本校对完成: 发现 {result.total_errors} 个错误 (耗时: {processing_time:.2f}ms)")
            return result
            
        except Exception as e:
            self.logger.error(f"文本校对失败: {str(e)}")
            return ProofreadingResult(
                success=False,
                error_message=str(e),
                original_text=text
            )
    
    def optimize_writing_style(self, 
                             text: str,
                             target_style: WritingStyle = WritingStyle.ACADEMIC,
                             config: ProofreadingConfig = None) -> ProofreadingResult:
        """
        优化写作风格
        
        Args:
            text: 输入文本
            target_style: 目标风格
            config: 配置
            
        Returns:
            优化结果
        """
        try:
            if config is None:
                config = ProofreadingConfig()
            
            config.target_style = target_style
            config.check_style = True
            
            # 执行基础校对
            result = self.proofread_text(text, config)
            
            # 应用风格优化
            optimized_text = result.corrected_text
            
            for optimizer_name, optimizer_func in self.optimizers.items():
                try:
                    optimized_text = optimizer_func(optimized_text, target_style, config)
                    result.rules_applied.append(f"style_optimization_{optimizer_name}")
                except Exception as e:
                    self.logger.warning(f"风格优化 {optimizer_name} 失败: {str(e)}")
            
            # 更新结果
            result.corrected_text = optimized_text
            result.corrected_word_count = len(optimized_text.split())
            result.corrected_sentence_count = len(self._split_sentences(optimized_text))
            
            # 重新计算质量评分
            result.improved_quality_score = self._calculate_quality_score(optimized_text, config)
            result.improvement_percentage = (
                (result.improved_quality_score - result.original_quality_score) / 
                result.original_quality_score * 100
                if result.original_quality_score > 0 else 0
            )
            
            return result
            
        except Exception as e:
            self.logger.error(f"写作风格优化失败: {str(e)}")
            return ProofreadingResult(
                success=False,
                error_message=str(e),
                original_text=text
            )
    
    def check_logical_consistency(self, 
                                text: str,
                                config: ProofreadingConfig = None) -> List[ConsistencyIssue]:
        """
        检查逻辑一致性
        
        Args:
            text: 输入文本
            config: 配置
            
        Returns:
            一致性问题列表
        """
        try:
            if config is None:
                config = ProofreadingConfig()
            
            consistency_issues = []
            
            # 时态一致性检查
            tense_issues = self._check_tense_consistency(text, config)
            consistency_issues.extend(tense_issues)
            
            # 术语一致性检查
            term_issues = self._check_terminology_consistency(text, config)
            consistency_issues.extend(term_issues)
            
            # 引用格式一致性检查
            citation_issues = self._check_citation_consistency(text, config)
            consistency_issues.extend(citation_issues)
            
            # 数字格式一致性检查
            number_issues = self._check_number_format_consistency(text, config)
            consistency_issues.extend(number_issues)
            
            return consistency_issues
            
        except Exception as e:
            self.logger.error(f"逻辑一致性检查失败: {str(e)}")
            return []
    
    def generate_improvement_suggestions(self, 
                                       result: ProofreadingResult) -> List[Dict[str, Any]]:
        """
        生成改进建议
        
        Args:
            result: 校对结果
            
        Returns:
            改进建议列表
        """
        suggestions = []
        
        try:
            # 基于错误类型的建议
            error_summary = result.get_error_summary()
            
            if error_summary['critical_errors'] > 0:
                suggestions.append({
                    'type': 'critical',
                    'priority': 'high',
                    'title': '严重错误需要立即修正',
                    'description': f'发现 {error_summary["critical_errors"]} 个严重错误，建议优先处理',
                    'action': '检查并修正所有标记为严重的错误'
                })
            
            if error_summary['most_common_error']:
                most_common = error_summary['most_common_error']
                count = error_summary['by_type'][most_common]
                suggestions.append({
                    'type': 'pattern',
                    'priority': 'medium',
                    'title': f'频繁出现的{most_common}错误',
                    'description': f'{most_common}错误出现了 {count} 次，建议重点关注',
                    'action': f'系统性地检查和修正{most_common}问题'
                })
            
            # 基于风格问题的建议
            if result.style_issues:
                style_types = {}
                for issue in result.style_issues:
                    style_types[issue.issue_type] = style_types.get(issue.issue_type, 0) + 1
                
                for style_type, count in style_types.items():
                    suggestions.append({
                        'type': 'style',
                        'priority': 'low',
                        'title': f'{style_type}风格改进',
                        'description': f'发现 {count} 个{style_type}相关的风格问题',
                        'action': f'考虑改进{style_type}以提升文本质量'
                    })
            
            # 基于质量评分的建议
            if result.original_quality_score < 0.7:
                suggestions.append({
                    'type': 'quality',
                    'priority': 'high',
                    'title': '整体质量需要提升',
                    'description': f'当前质量评分为 {result.original_quality_score:.2f}，建议进行全面改进',
                    'action': '考虑重新组织内容结构，改进语言表达'
                })
            
            # 基于改进效果的建议
            if result.improvement_percentage > 20:
                suggestions.append({
                    'type': 'success',
                    'priority': 'info',
                    'title': '显著改进',
                    'description': f'质量提升了 {result.improvement_percentage:.1f}%，效果显著',
                    'action': '继续保持当前的改进方向'
                })
            
            return suggestions
            
        except Exception as e:
            self.logger.error(f"生成改进建议失败: {str(e)}")
            return []
    
    # 词典和规则加载
    def _load_dictionaries(self):
        """加载词典"""
        # 常见拼写错误词典
        self.common_misspellings = {
            'recieve': 'receive',
            'seperate': 'separate',
            'definately': 'definitely',
            'occured': 'occurred',
            'begining': 'beginning',
            'existance': 'existence',
            'independant': 'independent',
            'neccessary': 'necessary',
            'accomodate': 'accommodate',
            'embarass': 'embarrass'
        }
        
        # 学术词汇词典
        self.academic_vocabulary = {
            'analyze', 'examine', 'investigate', 'demonstrate', 'establish',
            'indicate', 'suggest', 'propose', 'conclude', 'determine',
            'evaluate', 'assess', 'compare', 'contrast', 'identify',
            'significant', 'substantial', 'considerable', 'notable', 'evident',
            'furthermore', 'moreover', 'however', 'nevertheless', 'consequently'
        }
        
        # 非正式词汇替换
        self.informal_to_formal = {
            'a lot of': 'many',
            'lots of': 'numerous',
            'big': 'significant',
            'small': 'minimal',
            'get': 'obtain',
            'show': 'demonstrate',
            'find out': 'determine',
            'look at': 'examine',
            'think about': 'consider',
            'talk about': 'discuss'
        }
    
    def _load_grammar_rules(self):
        """加载语法规则"""
        # 主谓一致规则
        self.subject_verb_patterns = [
            (r'\b(\w+s)\s+(are|were)\b', 'plural_subject_plural_verb'),
            (r'\b(\w+)\s+(is|was)\b', 'singular_subject_singular_verb'),
            (r'\bdata\s+(is|was)\b', 'data_singular_error'),  # data应该用复数动词
            (r'\bcriteria\s+(is|was)\b', 'criteria_singular_error')  # criteria应该用复数动词
        ]
        
        # 时态一致性规则
        self.tense_patterns = [
            (r'\b(will|shall)\s+\w+', 'future_tense'),
            (r'\b\w+ed\b', 'past_tense'),
            (r'\b(is|are|am)\s+\w+ing\b', 'present_continuous'),
            (r'\b(was|were)\s+\w+ing\b', 'past_continuous')
        ]
        
        # 冠词规则
        self.article_patterns = [
            (r'\ba\s+[aeiou]', 'a_before_vowel_error'),  # a应该改为an
            (r'\ban\s+[bcdfghjklmnpqrstvwxyz]', 'an_before_consonant_error')  # an应该改为a
        ]
    
    def _load_style_rules(self):
        """加载风格规则"""
        # 被动语态模式
        self.passive_voice_patterns = [
            r'\b(is|are|was|were|been|being)\s+\w+ed\b',
            r'\b(is|are|was|were|been|being)\s+\w+en\b'
        ]
        
        # 冗余表达
        self.redundant_phrases = {
            'in order to': 'to',
            'due to the fact that': 'because',
            'at this point in time': 'now',
            'in the event that': 'if',
            'for the purpose of': 'for',
            'with regard to': 'regarding',
            'in spite of the fact that': 'although'
        }
        
        # 学术写作禁用词
        self.avoid_in_academic = {
            'very', 'really', 'quite', 'pretty', 'totally',
            'awesome', 'amazing', 'incredible', 'fantastic',
            'I think', 'I believe', 'I feel', 'in my opinion'
        }
    
    # 检查方法
    def _check_spelling(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查拼写错误"""
        errors = []
        
        # 检查常见拼写错误
        for misspelling, correction in self.common_misspellings.items():
            pattern = r'\b' + re.escape(misspelling) + r'\b'
            for match in re.finditer(pattern, text, re.IGNORECASE):
                error = ProofreadingError(
                    error_id=f"spell_{match.start()}",
                    error_type=ErrorType.SPELLING,
                    severity=SeverityLevel.MEDIUM,
                    start_position=match.start(),
                    end_position=match.end(),
                    original_text=match.group(),
                    suggestions=[correction],
                    preferred_suggestion=correction,
                    explanation=f"'{match.group()}' 是常见的拼写错误",
                    confidence=0.9
                )
                errors.append(error)
        
        return errors
    
    def _check_grammar(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查语法错误"""
        errors = []
        
        # 检查主谓一致
        for pattern, rule_id in self.subject_verb_patterns:
            for match in re.finditer(pattern, text, re.IGNORECASE):
                if rule_id == 'data_singular_error':
                    error = ProofreadingError(
                        error_id=f"grammar_{match.start()}",
                        error_type=ErrorType.GRAMMAR,
                        severity=SeverityLevel.HIGH,
                        start_position=match.start(),
                        end_position=match.end(),
                        original_text=match.group(),
                        suggestions=[match.group().replace('is', 'are').replace('was', 'were')],
                        explanation="'data' 是复数名词，应该使用复数动词",
                        rule_id=rule_id,
                        confidence=0.8
                    )
                    errors.append(error)
        
        # 检查冠词使用
        for pattern, rule_id in self.article_patterns:
            for match in re.finditer(pattern, text, re.IGNORECASE):
                if rule_id == 'a_before_vowel_error':
                    suggestion = match.group().replace('a ', 'an ')
                    error = ProofreadingError(
                        error_id=f"grammar_{match.start()}",
                        error_type=ErrorType.GRAMMAR,
                        severity=SeverityLevel.MEDIUM,
                        start_position=match.start(),
                        end_position=match.end(),
                        original_text=match.group(),
                        suggestions=[suggestion],
                        preferred_suggestion=suggestion,
                        explanation="元音前应使用 'an' 而不是 'a'",
                        rule_id=rule_id,
                        confidence=0.9
                    )
                    errors.append(error)
        
        return errors
    
    def _check_punctuation(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查标点符号错误"""
        errors = []
        
        # 检查双空格
        for match in re.finditer(r'  +', text):
            error = ProofreadingError(
                error_id=f"punct_{match.start()}",
                error_type=ErrorType.PUNCTUATION,
                severity=SeverityLevel.LOW,
                start_position=match.start(),
                end_position=match.end(),
                original_text=match.group(),
                suggestions=[' '],
                preferred_suggestion=' ',
                explanation="多余的空格",
                confidence=1.0
            )
            errors.append(error)
        
        # 检查句号前的空格
        for match in re.finditer(r' +\.', text):
            error = ProofreadingError(
                error_id=f"punct_{match.start()}",
                error_type=ErrorType.PUNCTUATION,
                severity=SeverityLevel.MEDIUM,
                start_position=match.start(),
                end_position=match.end(),
                original_text=match.group(),
                suggestions=['.'],
                preferred_suggestion='.',
                explanation="句号前不应有空格",
                confidence=0.9
            )
            errors.append(error)
        
        # 检查逗号后缺少空格
        for match in re.finditer(r',[^\s]', text):
            error = ProofreadingError(
                error_id=f"punct_{match.start()}",
                error_type=ErrorType.PUNCTUATION,
                severity=SeverityLevel.MEDIUM,
                start_position=match.start(),
                end_position=match.end(),
                original_text=match.group(),
                suggestions=[match.group()[0] + ' ' + match.group()[1]],
                explanation="逗号后应有空格",
                confidence=0.8
            )
            errors.append(error)
        
        return errors
    
    def _check_capitalization(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查大小写错误"""
        errors = []
        
        # 检查句首大写
        sentences = self._split_sentences(text)
        current_pos = 0
        
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and not sentence[0].isupper():
                # 找到句子在原文中的位置
                sentence_start = text.find(sentence, current_pos)
                if sentence_start != -1:
                    error = ProofreadingError(
                        error_id=f"cap_{sentence_start}",
                        error_type=ErrorType.CAPITALIZATION,
                        severity=SeverityLevel.MEDIUM,
                        start_position=sentence_start,
                        end_position=sentence_start + 1,
                        original_text=sentence[0],
                        suggestions=[sentence[0].upper()],
                        preferred_suggestion=sentence[0].upper(),
                        explanation="句首字母应该大写",
                        confidence=0.9
                    )
                    errors.append(error)
            
            current_pos = text.find(sentence, current_pos) + len(sentence)
        
        return errors
    
    def _check_word_choice(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查词汇选择"""
        errors = []
        
        # 检查非正式用词
        for informal, formal in self.informal_to_formal.items():
            pattern = r'\b' + re.escape(informal) + r'\b'
            for match in re.finditer(pattern, text, re.IGNORECASE):
                error = ProofreadingError(
                    error_id=f"word_{match.start()}",
                    error_type=ErrorType.WORD_CHOICE,
                    severity=SeverityLevel.LOW,
                    start_position=match.start(),
                    end_position=match.end(),
                    original_text=match.group(),
                    suggestions=[formal],
                    preferred_suggestion=formal,
                    explanation=f"在学术写作中，建议使用 '{formal}' 而不是 '{informal}'",
                    confidence=0.7
                )
                errors.append(error)
        
        return errors
    
    def _check_sentence_structure(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查句子结构"""
        errors = []
        
        sentences = self._split_sentences(text)
        
        for i, sentence in enumerate(sentences):
            sentence = sentence.strip()
            if not sentence:
                continue
            
            # 检查过长的句子
            words = sentence.split()
            if len(words) > 30:
                sentence_start = text.find(sentence)
                error = ProofreadingError(
                    error_id=f"struct_{sentence_start}",
                    error_type=ErrorType.SENTENCE_STRUCTURE,
                    severity=SeverityLevel.MEDIUM,
                    start_position=sentence_start,
                    end_position=sentence_start + len(sentence),
                    original_text=sentence,
                    suggestions=["考虑将长句分解为多个短句"],
                    explanation=f"句子过长 ({len(words)} 个单词)，可能影响可读性",
                    confidence=0.6
                )
                errors.append(error)
        
        return errors
    
    def _check_tense_consistency(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查时态一致性"""
        errors = []
        
        # 简化的时态检查
        tense_counts = {}
        
        for pattern, tense_type in self.tense_patterns:
            matches = re.findall(pattern, text, re.IGNORECASE)
            tense_counts[tense_type] = len(matches)
        
        # 如果存在多种时态且差异较大，标记为潜在问题
        if len(tense_counts) > 1:
            total_tenses = sum(tense_counts.values())
            dominant_tense = max(tense_counts.items(), key=lambda x: x[1])
            
            if dominant_tense[1] / total_tenses < 0.7:  # 没有明显主导时态
                error = ProofreadingError(
                    error_id="tense_consistency",
                    error_type=ErrorType.TENSE_CONSISTENCY,
                    severity=SeverityLevel.MEDIUM,
                    start_position=0,
                    end_position=len(text),
                    original_text="整个文档",
                    suggestions=["检查并统一时态使用"],
                    explanation="文档中使用了多种时态，建议保持一致性",
                    confidence=0.5
                )
                errors.append(error)
        
        return errors
    
    def _check_style_consistency(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查风格一致性"""
        errors = []
        
        # 检查学术写作中应避免的词汇
        for avoid_word in self.avoid_in_academic:
            pattern = r'\b' + re.escape(avoid_word) + r'\b'
            for match in re.finditer(pattern, text, re.IGNORECASE):
                error = ProofreadingError(
                    error_id=f"style_{match.start()}",
                    error_type=ErrorType.STYLE_CONSISTENCY,
                    severity=SeverityLevel.LOW,
                    start_position=match.start(),
                    end_position=match.end(),
                    original_text=match.group(),
                    suggestions=["考虑使用更正式的表达"],
                    explanation=f"'{avoid_word}' 在学术写作中应避免使用",
                    confidence=0.6
                )
                errors.append(error)
        
        return errors
    
    def _check_redundancy(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查冗余"""
        errors = []
        
        # 检查冗余短语
        for redundant, concise in self.redundant_phrases.items():
            pattern = r'\b' + re.escape(redundant) + r'\b'
            for match in re.finditer(pattern, text, re.IGNORECASE):
                error = ProofreadingError(
                    error_id=f"redundant_{match.start()}",
                    error_type=ErrorType.REDUNDANCY,
                    severity=SeverityLevel.LOW,
                    start_position=match.start(),
                    end_position=match.end(),
                    original_text=match.group(),
                    suggestions=[concise],
                    preferred_suggestion=concise,
                    explanation=f"'{redundant}' 可以简化为 '{concise}'",
                    confidence=0.8
                )
                errors.append(error)
        
        return errors
    
    def _check_clarity(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查清晰度"""
        errors = []
        
        # 检查被动语态过多
        passive_count = 0
        total_sentences = len(self._split_sentences(text))
        
        for pattern in self.passive_voice_patterns:
            passive_count += len(re.findall(pattern, text, re.IGNORECASE))
        
        if total_sentences > 0 and passive_count / total_sentences > 0.3:
            error = ProofreadingError(
                error_id="passive_voice_overuse",
                error_type=ErrorType.CLARITY,
                severity=SeverityLevel.MEDIUM,
                start_position=0,
                end_position=len(text),
                original_text="整个文档",
                suggestions=["考虑将部分被动语态改为主动语态"],
                explanation=f"被动语态使用过多 ({passive_count}/{total_sentences})，可能影响清晰度",
                confidence=0.6
            )
            errors.append(error)
        
        return errors
    
    def _check_academic_tone(self, text: str, config: ProofreadingConfig) -> List[ProofreadingError]:
        """检查学术语调"""
        errors = []
        
        # 检查第一人称过多使用
        first_person_patterns = [r'\bI\b', r'\bwe\b', r'\bour\b', r'\bmy\b']
        first_person_count = 0
        
        for pattern in first_person_patterns:
            first_person_count += len(re.findall(pattern, text, re.IGNORECASE))
        
        word_count = len(text.split())
        if word_count > 0 and first_person_count / word_count > 0.02:  # 超过2%
            error = ProofreadingError(
                error_id="first_person_overuse",
                error_type=ErrorType.ACADEMIC_TONE,
                severity=SeverityLevel.LOW,
                start_position=0,
                end_position=len(text),
                original_text="整个文档",
                suggestions=["考虑减少第一人称的使用，采用更客观的表达"],
                explanation="学术写作中应尽量避免过多使用第一人称",
                confidence=0.5
            )
            errors.append(error)
        
        return errors  
  
    # 风格和一致性检查方法
    def _check_style(self, text: str, config: ProofreadingConfig) -> List[StyleIssue]:
        """检查风格问题"""
        style_issues = []
        
        try:
            # 检查句子长度变化
            sentences = self._split_sentences(text)
            sentence_lengths = [len(sentence.split()) for sentence in sentences if sentence.strip()]
            
            if sentence_lengths:
                avg_length = sum(sentence_lengths) / len(sentence_lengths)
                
                # 检查句子长度单调性
                long_sentences = sum(1 for length in sentence_lengths if length > avg_length * 1.5)
                if long_sentences / len(sentence_lengths) > 0.3:
                    issue = StyleIssue(
                        issue_id="sentence_length_variation",
                        issue_type="sentence_structure",
                        description="句子长度缺乏变化，可能影响阅读体验",
                        improvement_suggestion="尝试混合使用长短句，增加文本的节奏感",
                        importance=SeverityLevel.LOW,
                        target_style=config.target_style,
                        current_style_score=0.6,
                        improved_style_score=0.8
                    )
                    style_issues.append(issue)
            
            # 检查段落结构
            paragraphs = text.split('\n\n')
            paragraph_lengths = [len(p.split()) for p in paragraphs if p.strip()]
            
            if paragraph_lengths:
                very_short_paragraphs = sum(1 for length in paragraph_lengths if length < 30)
                if very_short_paragraphs / len(paragraph_lengths) > 0.4:
                    issue = StyleIssue(
                        issue_id="paragraph_structure",
                        issue_type="paragraph_development",
                        description="段落过短，可能缺乏充分的论述",
                        improvement_suggestion="考虑扩展段落内容，提供更详细的解释和例证",
                        importance=SeverityLevel.MEDIUM,
                        target_style=config.target_style
                    )
                    style_issues.append(issue)
            
            # 检查连接词使用
            transition_words = [
                'however', 'furthermore', 'moreover', 'nevertheless', 'consequently',
                'therefore', 'thus', 'hence', 'additionally', 'similarly'
            ]
            
            transition_count = 0
            for word in transition_words:
                transition_count += len(re.findall(r'\b' + word + r'\b', text, re.IGNORECASE))
            
            total_sentences = len(sentences)
            if total_sentences > 5 and transition_count / total_sentences < 0.1:
                issue = StyleIssue(
                    issue_id="transition_words",
                    issue_type="coherence",
                    description="缺乏足够的过渡词，可能影响文本连贯性",
                    improvement_suggestion="适当添加过渡词来改善句子和段落之间的连接",
                    importance=SeverityLevel.MEDIUM,
                    target_style=config.target_style
                )
                style_issues.append(issue)
            
            return style_issues
            
        except Exception as e:
            self.logger.error(f"风格检查失败: {str(e)}")
            return style_issues
    
    def _check_consistency(self, text: str, config: ProofreadingConfig) -> List[ConsistencyIssue]:
        """检查一致性问题"""
        consistency_issues = []
        
        try:
            # 检查术语一致性
            term_issues = self._check_terminology_consistency(text, config)
            consistency_issues.extend(term_issues)
            
            # 检查引用格式一致性
            citation_issues = self._check_citation_consistency(text, config)
            consistency_issues.extend(citation_issues)
            
            # 检查数字格式一致性
            number_issues = self._check_number_format_consistency(text, config)
            consistency_issues.extend(number_issues)
            
            # 检查缩写一致性
            abbreviation_issues = self._check_abbreviation_consistency(text, config)
            consistency_issues.extend(abbreviation_issues)
            
            return consistency_issues
            
        except Exception as e:
            self.logger.error(f"一致性检查失败: {str(e)}")
            return consistency_issues
    
    def _check_terminology_consistency(self, text: str, config: ProofreadingConfig) -> List[ConsistencyIssue]:
        """检查术语一致性"""
        issues = []
        
        # 查找可能的术语变体
        potential_terms = {}
        words = re.findall(r'\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b', text)
        
        for word in words:
            # 简化的术语匹配逻辑
            base_word = word.lower().replace(' ', '')
            if base_word not in potential_terms:
                potential_terms[base_word] = []
            potential_terms[base_word].append(word)
        
        # 检查是否有不一致的术语使用
        for base_word, variants in potential_terms.items():
            if len(set(variants)) > 1 and len(variants) > 2:  # 有多个变体且出现多次
                issue = ConsistencyIssue(
                    issue_id=f"terminology_{base_word}",
                    issue_type="terminology",
                    description=f"术语 '{base_word}' 存在不一致的表达方式",
                    inconsistent_instances=[
                        {'term': variant, 'count': variants.count(variant)} 
                        for variant in set(variants)
                    ],
                    recommended_format=max(set(variants), key=variants.count),
                    severity=SeverityLevel.MEDIUM
                )
                issues.append(issue)
        
        return issues
    
    def _check_citation_consistency(self, text: str, config: ProofreadingConfig) -> List[ConsistencyIssue]:
        """检查引用格式一致性"""
        issues = []
        
        # 查找不同的引用格式
        citation_patterns = [
            (r'\[(\d+)\]', 'numeric_brackets'),
            (r'\(([^)]+\d{4}[^)]*)\)', 'author_year_parentheses'),
            (r'([A-Z][a-z]+\s+et\s+al\.,?\s+\d{4})', 'author_year_inline'),
            (r'<sup>(\d+)</sup>', 'superscript_numeric')
        ]
        
        found_formats = {}
        for pattern, format_type in citation_patterns:
            matches = re.findall(pattern, text)
            if matches:
                found_formats[format_type] = len(matches)
        
        if len(found_formats) > 1:
            issue = ConsistencyIssue(
                issue_id="citation_format",
                issue_type="citation_consistency",
                description="文档中使用了多种引用格式",
                inconsistent_instances=[
                    {'format': fmt, 'count': count} 
                    for fmt, count in found_formats.items()
                ],
                recommended_format=max(found_formats.items(), key=lambda x: x[1])[0],
                severity=SeverityLevel.HIGH
            )
            issues.append(issue)
        
        return issues
    
    def _check_number_format_consistency(self, text: str, config: ProofreadingConfig) -> List[ConsistencyIssue]:
        """检查数字格式一致性"""
        issues = []
        
        # 查找数字表示方式
        spelled_numbers = re.findall(r'\b(one|two|three|four|five|six|seven|eight|nine|ten)\b', text, re.IGNORECASE)
        digit_numbers = re.findall(r'\b(\d+)\b', text)
        
        if spelled_numbers and digit_numbers:
            # 检查小数字是否应该拼写
            small_digits = [d for d in digit_numbers if int(d) <= 10]
            
            if small_digits:
                issue = ConsistencyIssue(
                    issue_id="number_format",
                    issue_type="number_consistency",
                    description="小数字的表示方式不一致",
                    inconsistent_instances=[
                        {'type': 'spelled', 'count': len(spelled_numbers)},
                        {'type': 'digits', 'count': len(small_digits)}
                    ],
                    recommended_format="在学术写作中，10以下的数字通常应该拼写",
                    severity=SeverityLevel.LOW
                )
                issues.append(issue)
        
        return issues
    
    def _check_abbreviation_consistency(self, text: str, config: ProofreadingConfig) -> List[ConsistencyIssue]:
        """检查缩写一致性"""
        issues = []
        
        # 查找可能的缩写及其全称
        abbreviation_pattern = r'\b([A-Z]{2,})\b'
        abbreviations = re.findall(abbreviation_pattern, text)
        
        # 简化的缩写一致性检查
        abbreviation_counts = {}
        for abbr in abbreviations:
            abbreviation_counts[abbr] = abbreviation_counts.get(abbr, 0) + 1
        
        # 检查是否有缩写首次出现时没有给出全称
        for abbr, count in abbreviation_counts.items():
            if count > 1:  # 多次出现的缩写
                # 查找是否有全称定义
                full_form_pattern = r'([^.!?]*\b' + abbr + r'\b[^.!?]*)'
                matches = re.findall(full_form_pattern, text)
                
                has_definition = any('(' + abbr + ')' in match for match in matches)
                
                if not has_definition:
                    issue = ConsistencyIssue(
                        issue_id=f"abbreviation_{abbr}",
                        issue_type="abbreviation_definition",
                        description=f"缩写 '{abbr}' 首次出现时应给出全称",
                        recommended_format=f"首次使用时格式：全称 ({abbr})",
                        severity=SeverityLevel.MEDIUM
                    )
                    issues.append(issue)
        
        return issues
    
    # 优化方法
    def _optimize_conciseness(self, text: str, target_style: WritingStyle, config: ProofreadingConfig) -> str:
        """优化简洁性"""
        optimized_text = text
        
        # 替换冗余短语
        for redundant, concise in self.redundant_phrases.items():
            pattern = r'\b' + re.escape(redundant) + r'\b'
            optimized_text = re.sub(pattern, concise, optimized_text, flags=re.IGNORECASE)
        
        # 移除不必要的限定词
        unnecessary_qualifiers = ['very', 'quite', 'rather', 'somewhat', 'fairly']
        for qualifier in unnecessary_qualifiers:
            pattern = r'\b' + qualifier + r'\s+'
            optimized_text = re.sub(pattern, '', optimized_text, flags=re.IGNORECASE)
        
        return optimized_text
    
    def _optimize_clarity(self, text: str, target_style: WritingStyle, config: ProofreadingConfig) -> str:
        """优化清晰度"""
        optimized_text = text
        
        # 简化复杂句子结构（基础实现）
        sentences = self._split_sentences(optimized_text)
        improved_sentences = []
        
        for sentence in sentences:
            sentence = sentence.strip()
            if not sentence:
                continue
            
            # 如果句子过长，添加建议标记
            words = sentence.split()
            if len(words) > 25:
                # 简化处理：在连词处分割长句
                conjunctions = [' and ', ' but ', ' however ', ' therefore ', ' consequently ']
                for conj in conjunctions:
                    if conj in sentence.lower():
                        parts = sentence.split(conj, 1)
                        if len(parts) == 2:
                            sentence = parts[0].strip() + '. ' + parts[1].strip().capitalize()
                            break
            
            improved_sentences.append(sentence)
        
        optimized_text = ' '.join(improved_sentences)
        return optimized_text
    
    def _optimize_flow(self, text: str, target_style: WritingStyle, config: ProofreadingConfig) -> str:
        """优化流畅性"""
        optimized_text = text
        
        # 添加过渡词（简化实现）
        sentences = self._split_sentences(optimized_text)
        if len(sentences) > 1:
            improved_sentences = [sentences[0]]  # 第一句保持不变
            
            transition_words = ['Furthermore', 'Moreover', 'However', 'Additionally', 'Consequently']
            
            for i, sentence in enumerate(sentences[1:], 1):
                sentence = sentence.strip()
                if not sentence:
                    continue
                
                # 简单的过渡词添加逻辑
                if i % 3 == 0 and not any(tw.lower() in sentence.lower() for tw in transition_words):
                    # 每三句添加一个过渡词
                    transition = transition_words[i % len(transition_words)]
                    sentence = transition + ', ' + sentence.lower()
                
                improved_sentences.append(sentence)
            
            optimized_text = ' '.join(improved_sentences)
        
        return optimized_text
    
    def _optimize_tone(self, text: str, target_style: WritingStyle, config: ProofreadingConfig) -> str:
        """优化语调"""
        optimized_text = text
        
        if target_style == WritingStyle.ACADEMIC:
            # 替换非正式用词
            for informal, formal in self.informal_to_formal.items():
                pattern = r'\b' + re.escape(informal) + r'\b'
                optimized_text = re.sub(pattern, formal, optimized_text, flags=re.IGNORECASE)
            
            # 减少第一人称使用
            first_person_replacements = {
                'I think': 'It appears',
                'I believe': 'It is suggested',
                'We found': 'The results indicate',
                'Our study shows': 'This study demonstrates'
            }
            
            for first_person, objective in first_person_replacements.items():
                pattern = r'\b' + re.escape(first_person) + r'\b'
                optimized_text = re.sub(pattern, objective, optimized_text, flags=re.IGNORECASE)
        
        return optimized_text
    
    def _optimize_consistency(self, text: str, target_style: WritingStyle, config: ProofreadingConfig) -> str:
        """优化一致性"""
        optimized_text = text
        
        # 统一术语使用（简化实现）
        # 这里可以根据领域特定的术语词典进行统一
        
        # 统一数字格式
        # 将小数字转换为拼写形式
        number_words = {
            '1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five',
            '6': 'six', '7': 'seven', '8': 'eight', '9': 'nine', '10': 'ten'
        }
        
        for digit, word in number_words.items():
            # 只在句首或特定上下文中替换
            pattern = r'\b' + digit + r'\b'
            # 简化处理：只在句首替换
            optimized_text = re.sub(r'^' + digit + r'\b', word.capitalize(), optimized_text, flags=re.MULTILINE)
        
        return optimized_text
    
    # 辅助方法
    def _split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        # 简化的句子分割
        sentences = re.split(r'[.!?]+', text)
        return [s.strip() for s in sentences if s.strip()]
    
    def _filter_errors(self, errors: List[ProofreadingError], config: ProofreadingConfig) -> List[ProofreadingError]:
        """过滤错误"""
        filtered = []
        
        for error in errors:
            # 按严重程度过滤
            severity_order = {
                SeverityLevel.LOW: 1,
                SeverityLevel.MEDIUM: 2,
                SeverityLevel.HIGH: 3,
                SeverityLevel.CRITICAL: 4
            }
            
            if severity_order.get(error.severity, 0) >= severity_order.get(config.min_severity, 0):
                # 按置信度过滤
                if error.confidence >= config.min_confidence:
                    filtered.append(error)
        
        return filtered
    
    def _apply_corrections(self, text: str, errors: List[ProofreadingError], config: ProofreadingConfig) -> str:
        """应用修正"""
        corrected_text = text
        
        # 按位置倒序排序，避免位置偏移
        sorted_errors = sorted(errors, key=lambda x: x.start_position, reverse=True)
        
        for error in sorted_errors:
            if error.preferred_suggestion:
                # 检查是否应该自动修正
                should_auto_fix = (
                    (error.error_type == ErrorType.SPELLING and config.auto_fix_spelling) or
                    (error.error_type == ErrorType.PUNCTUATION and config.auto_fix_punctuation) or
                    (error.error_type == ErrorType.CAPITALIZATION and config.auto_fix_capitalization)
                )
                
                if should_auto_fix and error.confidence >= 0.8:
                    # 应用修正
                    corrected_text = (
                        corrected_text[:error.start_position] +
                        error.preferred_suggestion +
                        corrected_text[error.end_position:]
                    )
        
        return corrected_text
    
    def _count_errors_by_type(self, errors: List[ProofreadingError]) -> Dict[str, int]:
        """按类型统计错误"""
        counts = {}
        for error in errors:
            error_type = error.error_type.value
            counts[error_type] = counts.get(error_type, 0) + 1
        return counts
    
    def _count_errors_by_severity(self, errors: List[ProofreadingError]) -> Dict[str, int]:
        """按严重程度统计错误"""
        counts = {}
        for error in errors:
            severity = error.severity.value
            counts[severity] = counts.get(severity, 0) + 1
        return counts
    
    def _calculate_quality_score(self, text: str, config: ProofreadingConfig) -> float:
        """计算质量评分"""
        if not text.strip():
            return 0.0
        
        score = 1.0
        
        # 基于文本长度的基础评分
        word_count = len(text.split())
        if word_count < 10:
            score *= 0.5  # 文本过短
        
        # 基于句子结构的评分
        sentences = self._split_sentences(text)
        if sentences:
            avg_sentence_length = sum(len(s.split()) for s in sentences) / len(sentences)
            if avg_sentence_length > 30:
                score *= 0.9  # 句子过长
            elif avg_sentence_length < 5:
                score *= 0.8  # 句子过短
        
        # 基于词汇多样性的评分
        words = text.lower().split()
        unique_words = set(words)
        if words:
            vocabulary_diversity = len(unique_words) / len(words)
            score *= (0.5 + vocabulary_diversity * 0.5)
        
        # 基于学术词汇使用的评分
        if config.target_style == WritingStyle.ACADEMIC:
            academic_word_count = sum(1 for word in words if word in self.academic_vocabulary)
            if words:
                academic_ratio = academic_word_count / len(words)
                score *= (0.8 + academic_ratio * 0.2)
        
        return min(1.0, max(0.0, score))
    
    def get_proofreading_statistics(self, result: ProofreadingResult) -> Dict[str, Any]:
        """获取校对统计信息"""
        return {
            'processing_info': {
                'processing_time_ms': result.processing_time_ms,
                'rules_applied': result.rules_applied,
                'total_errors_found': result.total_errors
            },
            'error_analysis': result.get_error_summary(),
            'improvement_analysis': result.get_improvement_summary(),
            'text_statistics': {
                'original_word_count': result.original_word_count,
                'corrected_word_count': result.corrected_word_count,
                'word_count_change': result.corrected_word_count - result.original_word_count,
                'original_sentence_count': result.original_sentence_count,
                'corrected_sentence_count': result.corrected_sentence_count
            },
            'quality_metrics': {
                'original_quality_score': result.original_quality_score,
                'improved_quality_score': result.improved_quality_score,
                'improvement_percentage': result.improvement_percentage
            }
        }
    
    def export_proofreading_report(self, result: ProofreadingResult) -> str:
        """导出校对报告"""
        report_lines = [
            "=== 自动校对报告 ===",
            f"处理时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            f"处理耗时: {result.processing_time_ms:.2f} ms",
            "",
            "=== 文本统计 ===",
            f"原始字数: {result.original_word_count}",
            f"修正后字数: {result.corrected_word_count}",
            f"字数变化: {result.corrected_word_count - result.original_word_count:+d}",
            f"原始句数: {result.original_sentence_count}",
            f"修正后句数: {result.corrected_sentence_count}",
            "",
            "=== 错误统计 ===",
            f"总错误数: {result.total_errors}",
        ]
        
        # 按类型统计
        if result.errors_by_type:
            report_lines.append("\n按类型分布:")
            for error_type, count in result.errors_by_type.items():
                report_lines.append(f"  {error_type}: {count}")
        
        # 按严重程度统计
        if result.errors_by_severity:
            report_lines.append("\n按严重程度分布:")
            for severity, count in result.errors_by_severity.items():
                report_lines.append(f"  {severity}: {count}")
        
        # 质量评估
        report_lines.extend([
            "",
            "=== 质量评估 ===",
            f"原始质量评分: {result.original_quality_score:.2f}",
            f"改进后质量评分: {result.improved_quality_score:.2f}",
            f"质量提升: {result.improvement_percentage:.1f}%"
        ])
        
        # 主要错误示例
        if result.errors:
            report_lines.extend([
                "",
                "=== 主要错误示例 ===",
            ])
            
            # 显示前5个高置信度错误
            high_confidence_errors = [
                e for e in result.errors 
                if e.confidence >= 0.8
            ][:5]
            
            for i, error in enumerate(high_confidence_errors, 1):
                report_lines.append(f"{i}. {error.error_type.value}: '{error.original_text}'")
                if error.preferred_suggestion:
                    report_lines.append(f"   建议: '{error.preferred_suggestion}'")
                if error.explanation:
                    report_lines.append(f"   说明: {error.explanation}")
                report_lines.append("")
        
        return "\n".join(report_lines)