"""
段落连贯性检查器
分析段落间的转承关系和语义连贯性
"""

import re
import numpy as np
from typing import Dict, List, Tuple, Optional, Any
from dataclasses import dataclass
from enum import Enum
import logging
from collections import Counter

logger = logging.getLogger(__name__)


class TransitionType(Enum):
    """过渡类型枚举"""
    SEQUENCE = "sequence"        # 顺序关系
    CONTRAST = "contrast"        # 对比关系
    CAUSE_EFFECT = "cause_effect"  # 因果关系
    EXAMPLE = "example"          # 举例关系
    SUMMARY = "summary"          # 总结关系
    ADDITION = "addition"        # 并列关系
    NONE = "none"               # 无明显关系


@dataclass
class ParagraphRelation:
    """段落关系"""
    from_index: int
    to_index: int
    transition_type: TransitionType
    transition_words: List[str]
    semantic_similarity: float
    coherence_score: float


@dataclass
class CoherenceIssue:
    """连贯性问题"""
    paragraph_index: int
    issue_type: str
    description: str
    severity: str  # high, medium, low


@dataclass
class CoherenceAnalysisResult:
    """连贯性分析结果"""
    overall_score: float
    paragraph_relations: List[ParagraphRelation]
    transition_score: float
    semantic_score: float
    topic_consistency_score: float
    issues: List[CoherenceIssue]
    suggestions: List[Dict[str, Any]]


class CoherenceChecker:
    """段落连贯性检查器"""
    
    def __init__(self):
        """初始化检查器"""
        # 中文过渡词库
        self.transition_words = {
            TransitionType.SEQUENCE: [
                "首先", "其次", "然后", "接着", "随后", "最后", "第一", "第二", "第三",
                "起初", "开始", "接下来", "紧接着", "进而", "继而"
            ],
            TransitionType.CONTRAST: [
                "但是", "然而", "可是", "不过", "反之", "相反", "另一方面", "与此相反",
                "尽管", "虽然", "即使", "反而", "却", "对比之下"
            ],
            TransitionType.CAUSE_EFFECT: [
                "因此", "所以", "因为", "由于", "故", "既然", "以至于", "导致",
                "结果", "造成", "引起", "使得", "从而", "以致"
            ],
            TransitionType.EXAMPLE: [
                "例如", "比如", "举例来说", "譬如", "好比", "正如", "如同",
                "比方说", "具体来说", "拿...来说", "以...为例"
            ],
            TransitionType.SUMMARY: [
                "总之", "综上所述", "总的来说", "概括地说", "简言之", "一言以蔽之",
                "归纳起来", "总结", "概括", "总而言之", "由此可见"
            ],
            TransitionType.ADDITION: [
                "而且", "并且", "另外", "此外", "同时", "加之", "再者", "不仅",
                "不但", "还", "又", "也", "以及", "同样", "除此之外"
            ]
        }
        
        # 话题转换标识词
        self.topic_shift_indicators = [
            "另一个", "转向", "现在让我们", "接下来讨论", "下面我们看",
            "换个角度", "从另一个方面", "值得注意的是", "需要强调的是"
        ]
        
        # 指代词
        self.reference_words = [
            "这", "那", "此", "该", "这些", "那些", "它", "他们", "我们",
            "上述", "前述", "以上", "下面", "如下", "前面提到的", "后面将要"
        ]
    
    def check(self, content: str) -> CoherenceAnalysisResult:
        """
        检查文章连贯性
        
        Args:
            content: 文章内容
            
        Returns:
            连贯性分析结果
        """
        if not content or not content.strip():
            return self._empty_result("内容为空")
        
        # 分段
        paragraphs = self._split_paragraphs(content)
        if len(paragraphs) < 2:
            return self._empty_result("段落数量不足")
        
        # 分析段落关系
        paragraph_relations = self._analyze_paragraph_relations(paragraphs)
        
        # 计算各项评分
        transition_score = self._calculate_transition_score(paragraph_relations)
        semantic_score = self._calculate_semantic_score(paragraph_relations)
        topic_consistency_score = self._calculate_topic_consistency(paragraphs)
        
        # 综合评分
        overall_score = (
            transition_score * 0.3 +
            semantic_score * 0.4 +
            topic_consistency_score * 0.3
        )
        
        # 检测问题
        issues = self._detect_coherence_issues(paragraphs, paragraph_relations)
        
        # 生成建议
        suggestions = self._generate_suggestions(
            paragraphs, paragraph_relations, issues
        )
        
        return CoherenceAnalysisResult(
            overall_score=overall_score,
            paragraph_relations=paragraph_relations,
            transition_score=transition_score,
            semantic_score=semantic_score,
            topic_consistency_score=topic_consistency_score,
            issues=issues,
            suggestions=suggestions
        )
    
    def _split_paragraphs(self, content: str) -> List[str]:
        """分割段落"""
        # 按双换行符分割
        paragraphs = content.split('\n\n')
        # 过滤空段落并清理
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        return paragraphs
    
    def _analyze_paragraph_relations(self, paragraphs: List[str]) -> List[ParagraphRelation]:
        """分析段落间关系"""
        relations = []
        
        for i in range(len(paragraphs) - 1):
            current = paragraphs[i]
            next_para = paragraphs[i + 1]
            
            # 识别过渡类型
            transition_type, transition_words = self._identify_transition(
                current, next_para
            )
            
            # 计算语义相似度
            semantic_similarity = self._calculate_semantic_similarity(
                current, next_para
            )
            
            # 计算连贯性得分
            coherence_score = self._calculate_coherence_score(
                transition_type, transition_words, semantic_similarity
            )
            
            relations.append(ParagraphRelation(
                from_index=i,
                to_index=i + 1,
                transition_type=transition_type,
                transition_words=transition_words,
                semantic_similarity=semantic_similarity,
                coherence_score=coherence_score
            ))
        
        return relations
    
    def _identify_transition(self, current: str, next_para: str) -> Tuple[TransitionType, List[str]]:
        """识别过渡类型和过渡词"""
        found_words = []
        found_type = TransitionType.NONE
        
        # 检查下一段开头的过渡词（前50个字符）
        next_start = next_para[:50] if len(next_para) > 50 else next_para
        
        for trans_type, words in self.transition_words.items():
            for word in words:
                if word in next_start:
                    found_words.append(word)
                    found_type = trans_type
                    break
            if found_words:
                break
        
        # 如果没有找到明显的过渡词，检查是否有指代词
        if found_type == TransitionType.NONE:
            for ref_word in self.reference_words:
                if ref_word in next_start:
                    found_words.append(ref_word)
                    # 指代词通常表示延续关系
                    found_type = TransitionType.SEQUENCE
                    break
        
        return found_type, found_words
    
    def _calculate_semantic_similarity(self, text1: str, text2: str) -> float:
        """
        计算语义相似度
        简化版本：基于词汇重叠度
        """
        # 简单的词汇提取（中英文）
        words1 = self._extract_words(text1)
        words2 = self._extract_words(text2)
        
        if not words1 or not words2:
            return 0.0
        
        # 计算Jaccard相似度
        intersection = len(words1 & words2)
        union = len(words1 | words2)
        
        if union == 0:
            return 0.0
        
        jaccard = intersection / union
        
        # 考虑词频的余弦相似度
        cosine_sim = self._calculate_cosine_similarity(text1, text2)
        
        # 综合两种相似度
        return jaccard * 0.4 + cosine_sim * 0.6
    
    def _extract_words(self, text: str) -> set:
        """提取文本中的词汇"""
        # 提取中文词（简化版：以字为单位）
        chinese_chars = set(re.findall(r'[\u4e00-\u9fff]+', text))
        
        # 提取英文单词
        english_words = set(re.findall(r'\b[a-zA-Z]+\b', text.lower()))
        
        return chinese_chars | english_words
    
    def _calculate_cosine_similarity(self, text1: str, text2: str) -> float:
        """计算余弦相似度"""
        # 创建词频向量
        words1 = re.findall(r'[\u4e00-\u9fff]+|\b[a-zA-Z]+\b', text1.lower())
        words2 = re.findall(r'[\u4e00-\u9fff]+|\b[a-zA-Z]+\b', text2.lower())
        
        if not words1 or not words2:
            return 0.0
        
        # 词频统计
        freq1 = Counter(words1)
        freq2 = Counter(words2)
        
        # 获取所有词汇
        all_words = set(freq1.keys()) | set(freq2.keys())
        
        # 构建向量
        vec1 = np.array([freq1.get(word, 0) for word in all_words])
        vec2 = np.array([freq2.get(word, 0) for word in all_words])
        
        # 计算余弦相似度
        dot_product = np.dot(vec1, vec2)
        norm1 = np.linalg.norm(vec1)
        norm2 = np.linalg.norm(vec2)
        
        if norm1 == 0 or norm2 == 0:
            return 0.0
        
        return dot_product / (norm1 * norm2)
    
    def _calculate_coherence_score(self, transition_type: TransitionType,
                                  transition_words: List[str],
                                  semantic_similarity: float) -> float:
        """计算连贯性得分"""
        score = 0.0
        
        # 过渡词贡献
        if transition_type != TransitionType.NONE:
            score += 0.4
        if len(transition_words) > 0:
            score += 0.1 * min(len(transition_words), 2)  # 最多加0.2
        
        # 语义相似度贡献
        score += semantic_similarity * 0.4
        
        # 确保分数在0-1之间
        return min(1.0, score)
    
    def _calculate_transition_score(self, relations: List[ParagraphRelation]) -> float:
        """计算过渡评分"""
        if not relations:
            return 0.0
        
        # 统计有过渡词的段落关系
        with_transition = sum(
            1 for r in relations 
            if r.transition_type != TransitionType.NONE
        )
        
        # 基础分数：有过渡词的比例
        base_score = with_transition / len(relations)
        
        # 过渡类型多样性加分
        transition_types = set(
            r.transition_type for r in relations 
            if r.transition_type != TransitionType.NONE
        )
        diversity_bonus = min(0.2, len(transition_types) * 0.05)
        
        return min(1.0, base_score + diversity_bonus)
    
    def _calculate_semantic_score(self, relations: List[ParagraphRelation]) -> float:
        """计算语义连贯性评分"""
        if not relations:
            return 0.0
        
        similarities = [r.semantic_similarity for r in relations]
        avg_similarity = sum(similarities) / len(similarities)
        
        # 检查是否有过低的相似度（可能是话题突变）
        low_similarity_count = sum(1 for s in similarities if s < 0.2)
        penalty = low_similarity_count * 0.1
        
        return max(0.0, avg_similarity - penalty)
    
    def _calculate_topic_consistency(self, paragraphs: List[str]) -> float:
        """计算主题一致性"""
        if len(paragraphs) < 2:
            return 1.0
        
        # 提取所有段落的关键词
        all_keywords = []
        for para in paragraphs:
            keywords = self._extract_keywords(para)
            all_keywords.append(keywords)
        
        # 计算关键词重叠度
        total_consistency = 0.0
        comparisons = 0
        
        for i in range(len(all_keywords) - 1):
            for j in range(i + 1, len(all_keywords)):
                if all_keywords[i] and all_keywords[j]:
                    overlap = len(all_keywords[i] & all_keywords[j])
                    union = len(all_keywords[i] | all_keywords[j])
                    if union > 0:
                        total_consistency += overlap / union
                        comparisons += 1
        
        if comparisons == 0:
            return 0.5
        
        return total_consistency / comparisons
    
    def _extract_keywords(self, text: str, top_n: int = 10) -> set:
        """提取关键词（简化版）"""
        # 提取所有词
        words = re.findall(r'[\u4e00-\u9fff]+|\b[a-zA-Z]+\b', text.lower())
        
        # 过滤停用词（简化版）
        stopwords = {'的', '是', '在', '和', '了', '有', '我', '你', '他', 
                    'the', 'is', 'are', 'was', 'were', 'a', 'an', 'and'}
        words = [w for w in words if w not in stopwords and len(w) > 1]
        
        # 词频统计
        word_freq = Counter(words)
        
        # 返回高频词
        top_words = set(word for word, _ in word_freq.most_common(top_n))
        return top_words
    
    def _detect_coherence_issues(self, paragraphs: List[str],
                                relations: List[ParagraphRelation]) -> List[CoherenceIssue]:
        """检测连贯性问题"""
        issues = []
        
        # 检查过渡词缺失
        for i, relation in enumerate(relations):
            if relation.transition_type == TransitionType.NONE:
                if relation.semantic_similarity < 0.3:
                    issues.append(CoherenceIssue(
                        paragraph_index=i + 1,
                        issue_type="missing_transition",
                        description=f"第{i+2}段与前文缺少过渡，语义跳跃较大",
                        severity="medium"
                    ))
        
        # 检查语义断裂
        for i, relation in enumerate(relations):
            if relation.semantic_similarity < 0.15:
                issues.append(CoherenceIssue(
                    paragraph_index=i + 1,
                    issue_type="semantic_gap",
                    description=f"第{i+2}段与前文语义关联度过低，可能存在话题突变",
                    severity="high"
                ))
        
        # 检查段落长度差异
        lengths = [len(p) for p in paragraphs]
        avg_length = sum(lengths) / len(lengths)
        
        for i, length in enumerate(lengths):
            if length < avg_length * 0.3:
                issues.append(CoherenceIssue(
                    paragraph_index=i,
                    issue_type="paragraph_too_short",
                    description=f"第{i+1}段过短，可能需要扩充内容",
                    severity="low"
                ))
            elif length > avg_length * 2.5:
                issues.append(CoherenceIssue(
                    paragraph_index=i,
                    issue_type="paragraph_too_long",
                    description=f"第{i+1}段过长，建议拆分为多个段落",
                    severity="low"
                ))
        
        return issues
    
    def _generate_suggestions(self, paragraphs: List[str],
                            relations: List[ParagraphRelation],
                            issues: List[CoherenceIssue]) -> List[Dict[str, Any]]:
        """生成改进建议"""
        suggestions = []
        
        # 基于问题生成建议
        for issue in issues:
            if issue.issue_type == "missing_transition":
                suggestions.append({
                    "type": "coherence",
                    "priority": "medium",
                    "location": f"第{issue.paragraph_index + 1}段开头",
                    "description": "添加过渡词或过渡句",
                    "detail": "建议在段落开头添加过渡词，建立与前文的联系",
                    "examples": [
                        "此外，还需要考虑...",
                        "与此同时，我们也要注意...",
                        "基于上述分析，可以看出..."
                    ],
                    "impact": 0.15
                })
            
            elif issue.issue_type == "semantic_gap":
                suggestions.append({
                    "type": "coherence",
                    "priority": "high",
                    "location": f"第{issue.paragraph_index}段与第{issue.paragraph_index + 1}段之间",
                    "description": "增加过渡段落或调整段落顺序",
                    "detail": "两段内容跳跃较大，建议增加过渡内容或重新组织段落顺序",
                    "impact": 0.25
                })
            
            elif issue.issue_type == "paragraph_too_long":
                suggestions.append({
                    "type": "structure",
                    "priority": "low",
                    "location": f"第{issue.paragraph_index + 1}段",
                    "description": "拆分长段落",
                    "detail": "段落过长影响阅读体验，建议按照不同的观点或论述拆分",
                    "impact": 0.1
                })
        
        # 整体建议
        avg_coherence = sum(r.coherence_score for r in relations) / len(relations) if relations else 0
        
        if avg_coherence < 0.5:
            suggestions.append({
                "type": "coherence",
                "priority": "high",
                "location": "全文",
                "description": "增强段落间的逻辑联系",
                "detail": "整体连贯性较弱，建议：1）添加过渡词句；2）确保前后段落有明确的逻辑关系；3）使用指代词建立联系",
                "impact": 0.3
            })
        
        # 过渡词多样性建议
        transition_types = set(
            r.transition_type for r in relations 
            if r.transition_type != TransitionType.NONE
        )
        
        if len(transition_types) < 3:
            suggestions.append({
                "type": "coherence",
                "priority": "low",
                "location": "全文",
                "description": "丰富过渡词类型",
                "detail": "过渡词类型较单一，建议使用多样化的过渡词，如因果、对比、递进等关系",
                "examples": self._get_transition_examples(),
                "impact": 0.1
            })
        
        return suggestions
    
    def _get_transition_examples(self) -> List[str]:
        """获取过渡词示例"""
        examples = []
        for trans_type, words in self.transition_words.items():
            if words:
                examples.extend(words[:2])  # 每种类型取2个示例
        return examples[:10]  # 返回最多10个示例
    
    def _empty_result(self, reason: str) -> CoherenceAnalysisResult:
        """返回空结果"""
        return CoherenceAnalysisResult(
            overall_score=0.0,
            paragraph_relations=[],
            transition_score=0.0,
            semantic_score=0.0,
            topic_consistency_score=0.0,
            issues=[CoherenceIssue(0, "error", reason, "high")],
            suggestions=[]
        )