"""
关键词密度分析器
提取关键词、计算密度、检测堆砌问题
"""

import re
import math
from typing import Dict, List, Tuple, Optional, Any, Set
from dataclasses import dataclass
from collections import Counter, defaultdict
import logging

logger = logging.getLogger(__name__)


@dataclass
class Keyword:
    """关键词信息"""
    word: str
    frequency: int
    density: float
    tfidf_score: float
    positions: List[int]
    is_primary: bool


@dataclass
class KeywordDistribution:
    """关键词分布信息"""
    total_words: int
    unique_keywords: int
    avg_distance: float
    clustering_score: float
    distribution_uniformity: float


@dataclass
class KeywordIssue:
    """关键词问题"""
    issue_type: str  # overstuffing, insufficient, uneven_distribution
    keyword: str
    description: str
    severity: str  # high, medium, low
    locations: List[int]


@dataclass
class KeywordAnalysisResult:
    """关键词分析结果"""
    keywords: List[Keyword]
    primary_keywords: List[Keyword]
    distribution: KeywordDistribution
    optimal_density_range: Tuple[float, float]
    issues: List[KeywordIssue]
    suggestions: List[Dict[str, Any]]
    synonym_recommendations: List[Dict[str, Any]]


class KeywordAnalyzer:
    """关键词密度分析器"""
    
    def __init__(self):
        """初始化分析器"""
        # 理想关键词密度范围
        self.optimal_density_min = 0.01  # 1%
        self.optimal_density_max = 0.03  # 3%
        
        # 中文停用词
        self.chinese_stopwords = {
            '的', '了', '在', '是', '我', '有', '和', '就', '不', '人',
            '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去',
            '你', '会', '着', '没有', '看', '好', '自己', '这', '那', '些',
            '他', '她', '它', '们', '么', '之', '与', '及', '其', '此',
            '但', '而', '或', '若', '虽', '然', '乃', '即', '便', '于',
            '如', '若', '所', '为', '以', '因', '由', '故', '从', '向',
            '把', '被', '让', '给', '对', '将', '令', '使', '用', '通过',
            '经过', '正在', '已经', '曾经', '开始', '结束', '可能', '应该'
        }
        
        # 英文停用词
        self.english_stopwords = {
            'the', 'be', 'to', 'of', 'and', 'a', 'in', 'that', 'have',
            'i', 'it', 'for', 'not', 'on', 'with', 'he', 'as', 'you',
            'do', 'at', 'this', 'but', 'his', 'by', 'from', 'they',
            'we', 'say', 'her', 'she', 'or', 'an', 'will', 'my', 'one',
            'all', 'would', 'there', 'their', 'what', 'so', 'up', 'out',
            'if', 'about', 'who', 'get', 'which', 'go', 'me', 'when',
            'can', 'like', 'time', 'no', 'just', 'him', 'know', 'take'
        }
        
        # 同义词库（简化版）
        self.synonym_groups = [
            {'企业', '公司', '组织', '机构'},
            {'发展', '进步', '成长', '提升'},
            {'技术', '科技', '技能', '工艺'},
            {'数据', '信息', '资料', '数字'},
            {'分析', '解析', '剖析', '研究'},
            {'管理', '治理', '监管', '控制'},
            {'创新', '革新', '创造', '突破'},
            {'优化', '改进', '提升', '改善'},
            {'实施', '执行', '推行', '落实'},
            {'评估', '评价', '考核', '测评'}
        ]
        
        # TF-IDF计算的文档频率（模拟）
        self.doc_frequency = defaultdict(lambda: 1)
        self.total_docs = 1000  # 假设的文档总数
    
    def analyze(self, content: str) -> KeywordAnalysisResult:
        """
        分析关键词
        
        Args:
            content: 文章内容
            
        Returns:
            关键词分析结果
        """
        if not content or not content.strip():
            return self._empty_result("内容为空")
        
        # 分词和预处理
        words = self._tokenize(content)
        if not words:
            return self._empty_result("无法提取词汇")
        
        # 计算词频
        word_freq = Counter(words)
        
        # 计算TF-IDF
        tfidf_scores = self._calculate_tfidf(words, word_freq)
        
        # 提取关键词
        keywords = self._extract_keywords(word_freq, tfidf_scores, content)
        
        # 识别主要关键词
        primary_keywords = self._identify_primary_keywords(keywords)
        
        # 分析关键词分布
        distribution = self._analyze_distribution(keywords, len(words))
        
        # 检测问题
        issues = self._detect_issues(keywords, distribution)
        
        # 生成建议
        suggestions = self._generate_suggestions(keywords, distribution, issues)
        
        # 生成同义词推荐
        synonym_recommendations = self._recommend_synonyms(keywords)
        
        return KeywordAnalysisResult(
            keywords=keywords,
            primary_keywords=primary_keywords,
            distribution=distribution,
            optimal_density_range=(self.optimal_density_min, self.optimal_density_max),
            issues=issues,
            suggestions=suggestions,
            synonym_recommendations=synonym_recommendations
        )
    
    def _tokenize(self, content: str) -> List[str]:
        """分词"""
        words = []
        
        # 提取中文词（以字为单位，后续可优化为使用jieba等分词工具）
        chinese_pattern = r'[\u4e00-\u9fff]+'
        chinese_words = re.findall(chinese_pattern, content)
        
        # 简单的中文分词（2-4字词组）
        for text in chinese_words:
            # 2字词
            for i in range(len(text) - 1):
                bigram = text[i:i+2]
                if bigram not in self.chinese_stopwords:
                    words.append(bigram)
            
            # 3字词
            for i in range(len(text) - 2):
                trigram = text[i:i+3]
                words.append(trigram)
            
            # 4字词（成语等）
            for i in range(len(text) - 3):
                fourgram = text[i:i+4]
                words.append(fourgram)
        
        # 提取英文词
        english_pattern = r'\b[a-zA-Z]+\b'
        english_words = re.findall(english_pattern, content.lower())
        
        # 过滤英文停用词
        english_words = [w for w in english_words 
                        if w not in self.english_stopwords and len(w) > 2]
        words.extend(english_words)
        
        return words
    
    def _calculate_tfidf(self, words: List[str], word_freq: Counter) -> Dict[str, float]:
        """计算TF-IDF分数"""
        tfidf_scores = {}
        total_words = len(words)
        
        for word, freq in word_freq.items():
            # 计算词频（TF）
            tf = freq / total_words
            
            # 计算逆文档频率（IDF）- 使用模拟值
            doc_freq = self.doc_frequency.get(word, 10)
            idf = math.log(self.total_docs / (1 + doc_freq))
            
            # TF-IDF
            tfidf_scores[word] = tf * idf
        
        return tfidf_scores
    
    def _extract_keywords(self, word_freq: Counter, 
                         tfidf_scores: Dict[str, float],
                         content: str) -> List[Keyword]:
        """提取关键词"""
        keywords = []
        total_words = sum(word_freq.values())
        
        # 按TF-IDF分数排序，取前50个
        sorted_words = sorted(tfidf_scores.items(), 
                            key=lambda x: x[1], reverse=True)[:50]
        
        for word, tfidf in sorted_words:
            freq = word_freq[word]
            density = freq / total_words
            
            # 查找词的位置
            positions = self._find_word_positions(word, content)
            
            keywords.append(Keyword(
                word=word,
                frequency=freq,
                density=density,
                tfidf_score=tfidf,
                positions=positions,
                is_primary=False  # 后续识别
            ))
        
        return keywords
    
    def _find_word_positions(self, word: str, content: str) -> List[int]:
        """查找词在文本中的位置"""
        positions = []
        pattern = re.compile(re.escape(word), re.IGNORECASE)
        
        for match in pattern.finditer(content):
            positions.append(match.start())
        
        return positions[:10]  # 最多返回10个位置
    
    def _identify_primary_keywords(self, keywords: List[Keyword]) -> List[Keyword]:
        """识别主要关键词"""
        if not keywords:
            return []
        
        # 按TF-IDF分数排序
        sorted_keywords = sorted(keywords, 
                               key=lambda k: k.tfidf_score, reverse=True)
        
        # 取前10个作为主要关键词
        primary_keywords = sorted_keywords[:10]
        
        # 标记为主要关键词
        for kw in primary_keywords:
            kw.is_primary = True
        
        return primary_keywords
    
    def _analyze_distribution(self, keywords: List[Keyword], 
                            total_words: int) -> KeywordDistribution:
        """分析关键词分布"""
        if not keywords:
            return KeywordDistribution(
                total_words=total_words,
                unique_keywords=0,
                avg_distance=0,
                clustering_score=0,
                distribution_uniformity=0
            )
        
        # 计算平均距离
        all_positions = []
        for kw in keywords[:10]:  # 只分析前10个关键词
            all_positions.extend(kw.positions)
        
        all_positions.sort()
        
        avg_distance = 0
        if len(all_positions) > 1:
            distances = [all_positions[i+1] - all_positions[i] 
                        for i in range(len(all_positions)-1)]
            avg_distance = sum(distances) / len(distances)
        
        # 计算聚集度（越小越聚集）
        clustering_score = self._calculate_clustering(all_positions)
        
        # 计算分布均匀度
        distribution_uniformity = self._calculate_uniformity(all_positions, total_words)
        
        return KeywordDistribution(
            total_words=total_words,
            unique_keywords=len(keywords),
            avg_distance=avg_distance,
            clustering_score=clustering_score,
            distribution_uniformity=distribution_uniformity
        )
    
    def _calculate_clustering(self, positions: List[int]) -> float:
        """计算聚集度"""
        if len(positions) < 2:
            return 0
        
        # 计算位置的标准差
        mean_pos = sum(positions) / len(positions)
        variance = sum((p - mean_pos) ** 2 for p in positions) / len(positions)
        std_dev = math.sqrt(variance)
        
        # 归一化（0-1，越小越聚集）
        max_possible_std = mean_pos  # 理论最大标准差
        if max_possible_std > 0:
            return min(1.0, std_dev / max_possible_std)
        
        return 0
    
    def _calculate_uniformity(self, positions: List[int], total_length: int) -> float:
        """计算分布均匀度"""
        if not positions or total_length == 0:
            return 0
        
        # 将文本分成若干段，检查每段是否有关键词
        num_segments = 10
        segment_size = total_length // num_segments
        segments_with_keywords = set()
        
        for pos in positions:
            segment = pos // segment_size
            segments_with_keywords.add(segment)
        
        # 均匀度 = 有关键词的段数 / 总段数
        return len(segments_with_keywords) / num_segments
    
    def _detect_issues(self, keywords: List[Keyword],
                      distribution: KeywordDistribution) -> List[KeywordIssue]:
        """检测关键词问题"""
        issues = []
        
        # 检测关键词堆砌
        for kw in keywords:
            if kw.density > self.optimal_density_max * 1.5:
                issues.append(KeywordIssue(
                    issue_type="overstuffing",
                    keyword=kw.word,
                    description=f"关键词'{kw.word}'密度过高（{kw.density:.2%}）",
                    severity="high",
                    locations=kw.positions[:5]
                ))
        
        # 检测主要关键词不足
        primary_keywords = [kw for kw in keywords if kw.is_primary]
        if len(primary_keywords) < 3:
            issues.append(KeywordIssue(
                issue_type="insufficient",
                keyword="",
                description="主要关键词数量不足，文章主题不够突出",
                severity="medium",
                locations=[]
            ))
        
        # 检测分布不均
        if distribution.distribution_uniformity < 0.4:
            issues.append(KeywordIssue(
                issue_type="uneven_distribution",
                keyword="",
                description="关键词分布不均匀，集中在部分段落",
                severity="medium",
                locations=[]
            ))
        
        # 检测过度聚集
        if distribution.clustering_score < 0.3:
            issues.append(KeywordIssue(
                issue_type="clustering",
                keyword="",
                description="关键词过度聚集，建议分散到全文",
                severity="low",
                locations=[]
            ))
        
        return issues
    
    def _generate_suggestions(self, keywords: List[Keyword],
                            distribution: KeywordDistribution,
                            issues: List[KeywordIssue]) -> List[Dict[str, Any]]:
        """生成优化建议"""
        suggestions = []
        
        # 基于问题生成建议
        overstuffing_issues = [i for i in issues if i.issue_type == "overstuffing"]
        if overstuffing_issues:
            keywords_list = [i.keyword for i in overstuffing_issues[:3]]
            suggestions.append({
                "type": "keyword",
                "priority": "high",
                "description": "减少高频关键词使用",
                "detail": f"以下关键词密度过高：{', '.join(keywords_list)}",
                "action": "使用同义词替换或删减重复内容",
                "impact": 0.2
            })
        
        if any(i.issue_type == "insufficient" for i in issues):
            suggestions.append({
                "type": "keyword",
                "priority": "medium",
                "description": "增加核心关键词",
                "detail": "文章缺少明确的核心关键词，主题不够突出",
                "action": "确定2-3个核心关键词并适当增加使用频率",
                "examples": self._suggest_core_keywords(keywords),
                "impact": 0.15
            })
        
        if distribution.distribution_uniformity < 0.5:
            suggestions.append({
                "type": "keyword",
                "priority": "medium",
                "description": "改善关键词分布",
                "detail": "关键词分布不均匀，建议在全文均匀分布",
                "action": "在缺少关键词的段落中适当添加相关内容",
                "impact": 0.1
            })
        
        # 关键词密度建议
        avg_density = sum(kw.density for kw in keywords[:5]) / 5 if keywords else 0
        if avg_density < self.optimal_density_min:
            suggestions.append({
                "type": "keyword",
                "priority": "low",
                "description": "适当增加关键词密度",
                "detail": f"当前平均密度{avg_density:.2%}，建议提升到{self.optimal_density_min:.1%}-{self.optimal_density_max:.1%}",
                "impact": 0.1
            })
        
        return suggestions
    
    def _suggest_core_keywords(self, keywords: List[Keyword]) -> List[str]:
        """建议核心关键词"""
        if not keywords:
            return []
        
        # 选择TF-IDF分数最高的5个词作为建议
        sorted_keywords = sorted(keywords, 
                               key=lambda k: k.tfidf_score, reverse=True)
        
        return [kw.word for kw in sorted_keywords[:5]]
    
    def _recommend_synonyms(self, keywords: List[Keyword]) -> List[Dict[str, Any]]:
        """推荐同义词"""
        recommendations = []
        
        # 为高频关键词推荐同义词
        high_freq_keywords = [kw for kw in keywords if kw.density > self.optimal_density_max]
        
        for kw in high_freq_keywords[:5]:
            synonyms = self._find_synonyms(kw.word)
            if synonyms:
                recommendations.append({
                    "keyword": kw.word,
                    "frequency": kw.frequency,
                    "density": kw.density,
                    "synonyms": list(synonyms),
                    "suggestion": f"可以使用'{', '.join(list(synonyms)[:3])}'等词替换部分'{kw.word}'"
                })
        
        return recommendations
    
    def _find_synonyms(self, word: str) -> Set[str]:
        """查找同义词"""
        for group in self.synonym_groups:
            if word in group:
                return group - {word}
        
        # 如果没有找到，返回空集
        return set()
    
    def _empty_result(self, reason: str) -> KeywordAnalysisResult:
        """返回空结果"""
        return KeywordAnalysisResult(
            keywords=[],
            primary_keywords=[],
            distribution=KeywordDistribution(0, 0, 0, 0, 0),
            optimal_density_range=(self.optimal_density_min, self.optimal_density_max),
            issues=[KeywordIssue("error", "", reason, "high", [])],
            suggestions=[],
            synonym_recommendations=[]
        )