"""
关键词提取模块

提供查询关键词的提取、过滤和评分功能。
"""

from typing import List, Dict, Optional, Any, Set, Tuple
import re
from collections import Counter


class KeywordExtractor:
    """
    关键词提取器

    提供多种关键词提取策略，支持中文分词和TF-IDF等算法。
    """

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        初始化关键词提取器

        Args:
            config: 提取配置
        """
        self.config = config or {}
        self.stopwords = self._load_stopwords()
        self.min_word_length = self.config.get('min_word_length', 2)
        self.max_keywords = self.config.get('max_keywords', 10)

        # 尝试导入jieba
        self.use_jieba = self._try_import_jieba()

    def _try_import_jieba(self) -> bool:
        """尝试导入jieba分词库"""
        try:
            import jieba
            self.jieba = jieba
            return True
        except ImportError:
            print("警告: jieba库未安装，将使用简单的分词方法")
            self.jieba = None
            return False

    def _load_stopwords(self) -> Set[str]:
        """加载停用词集合"""
        # 中文常用停用词
        chinese_stopwords = {
            '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个',
            '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好',
            '自己', '这', '那', '它', '他', '她', '们', '这个', '那个', '什么', '怎么',
            '为什么', '哪里', '哪个', '多少', '几个', '第一', '第二', '可以', '能够',
            '应该', '必须', '需要', '想要', '希望', '觉得', '认为', '知道', '了解',
            '明白', '清楚', '如果', '虽然', '但是', '然而', '因为', '所以', '由于',
            '为了', '关于', '对于', '根据', '按照', '通过', '利用', '使用', '进行',
            '实现', '完成', '达到', '获得', '取得', '提高', '增加', '减少', '降低',
            '改善', '优化', '发展', '推动', '促进', '支持', '帮助', '协助', '配合',
            '参与', '加入', '包含', '包括', '涉及', '相关', '类似', '相同', '不同',
            '各种', '多个', '各个', '每个', '所有', '全部', '整个', '部分', '一些',
            '某些', '任何', '各种', '以及', '或者', '而且', '并且', '同时', '另外',
            '此外', '总之', '总的来说', '基本上', '一般来说', '通常', '经常', '总是',
            '从未', '已经', '正在', '将要', '可能', '也许', '大概', '左右', '前后',
            '上下', '之间', '之内', '之外', '以上', '以下', '之前', '之后', '现在',
            '目前', '当前', '最近', '过去', '将来', '未来', '今天', '昨天', '明天'
        }

        # 英文停用词
        english_stopwords = {
            'a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from', 'has', 'he',
            'in', 'is', 'it', 'its', 'of', 'on', 'that', 'the', 'to', 'was', 'were',
            'will', 'with', 'the', 'this', 'that', 'these', 'those', 'i', 'you', 'he',
            'she', 'it', 'we', 'they', 'me', 'him', 'her', 'us', 'them', 'my', 'your',
            'his', 'her', 'its', 'our', 'their', 'mine', 'yours', 'hers', 'ours', 'theirs',
            'what', 'where', 'when', 'why', 'how', 'who', 'which', 'whom', 'whose',
            'can', 'could', 'may', 'might', 'must', 'shall', 'should', 'will', 'would',
            'do', 'does', 'did', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
            'have', 'has', 'had', 'having', 'but', 'or', 'if', 'because', 'although',
            'though', 'while', 'whereas', 'unless', 'until', 'since', 'before', 'after'
        }

        return chinese_stopwords.union(english_stopwords)

    def extract_keywords(
        self,
        text: str,
        method: str = "frequency",
        max_keywords: Optional[int] = None,
        include_scores: bool = False
    ) -> List[str] | List[Tuple[str, float]]:
        """
        提取关键词

        Args:
            text: 输入文本
            method: 提取方法 ("frequency", "position", "length", "hybrid")
            max_keywords: 最大关键词数量
            include_scores: 是否包含分数

        Returns:
            关键词列表，或包含分数的关键词元组列表
        """
        if not text:
            return [] if include_scores else []

        max_keywords = max_keywords or self.max_keywords

        # 分词
        words = self._tokenize(text)

        # 过滤
        filtered_words = self._filter_words(words)

        if not filtered_words:
            return [] if include_scores else []

        # 根据方法提取关键词
        if method == "frequency":
            keywords_with_scores = self._extract_by_frequency(filtered_words)
        elif method == "position":
            keywords_with_scores = self._extract_by_position(filtered_words)
        elif method == "length":
            keywords_with_scores = self._extract_by_length(filtered_words)
        elif method == "hybrid":
            keywords_with_scores = self._extract_hybrid(filtered_words)
        else:
            raise ValueError(f"不支持的提取方法: {method}")

        # 限制数量
        keywords_with_scores = keywords_with_scores[:max_keywords]

        if include_scores:
            return keywords_with_scores
        else:
            return [word for word, _ in keywords_with_scores]

    def _tokenize(self, text: str) -> List[str]:
        """分词"""
        if self.use_jieba and self._contains_chinese(text):
            # 使用jieba分词处理中文
            words = list(self.jieba.cut(text))
        else:
            # 简单分词：按空格和标点分割
            words = re.split(r'[\s\u3000\uff0c\u3001\uff1b\uff1f\uff1a\uff08\uff09\u3002\uff01\uff1f]+', text)

        # 过滤空字符串
        words = [word.strip() for word in words if word.strip()]
        return words

    def _contains_chinese(self, text: str) -> bool:
        """检查文本是否包含中文"""
        return bool(re.search(r'[\u4e00-\u9fff]', text))

    def _filter_words(self, words: List[str]) -> List[str]:
        """过滤词汇"""
        filtered = []
        for word in words:
            word = word.strip()

            # 长度检查
            if len(word) < self.min_word_length:
                continue

            # 停用词检查
            if word.lower() in self.stopwords:
                continue

            # 纯数字检查
            if word.isdigit():
                continue

            # 纯标点检查
            if not re.search(r'[a-zA-Z\u4e00-\u9fff]', word):
                continue

            filtered.append(word)

        return filtered

    def _extract_by_frequency(self, words: List[str]) -> List[Tuple[str, float]]:
        """基于词频提取关键词"""
        word_freq = Counter(words)
        total_words = len(words)

        # 计算归一化频率
        keywords_with_scores = [
            (word, freq / total_words) for word, freq in word_freq.items()
        ]

        # 按频率排序
        keywords_with_scores.sort(key=lambda x: x[1], reverse=True)

        return keywords_with_scores

    def _extract_by_position(self, words: List[str]) -> List[Tuple[str, float]]:
        """基于位置提取关键词（前面的词更重要）"""
        word_positions = {}
        for i, word in enumerate(words):
            if word not in word_positions:
                word_positions[word] = []
            word_positions[word].append(i)

        # 计算位置分数（前面的词分数更高）
        keywords_with_scores = []
        total_words = len(words)

        for word, positions in word_positions.items():
            # 平均位置（越小越好）
            avg_position = sum(positions) / len(positions)
            # 位置分数：1 - (平均位置/总词数)
            score = 1 - (avg_position / total_words)
            keywords_with_scores.append((word, score))

        # 按分数排序
        keywords_with_scores.sort(key=lambda x: x[1], reverse=True)

        return keywords_with_scores

    def _extract_by_length(self, words: List[str]) -> List[Tuple[str, float]]:
        """基于词长提取关键词（长的词可能更重要）"""
        max_length = max(len(word) for word in words) if words else 1

        keywords_with_scores = [
            (word, len(word) / max_length) for word in set(words)
        ]

        # 按长度排序
        keywords_with_scores.sort(key=lambda x: x[1], reverse=True)

        return keywords_with_scores

    def _extract_hybrid(self, words: List[str]) -> List[Tuple[str, float]]:
        """混合方法提取关键词"""
        # 获取各种方法的分数
        freq_scores = dict(self._extract_by_frequency(words))
        pos_scores = dict(self._extract_by_position(words))
        length_scores = dict(self._extract_by_length(words))

        # 计算混合分数
        hybrid_scores = {}
        for word in set(words):
            # 加权平均：频率50% + 位置30% + 长度20%
            freq_score = freq_scores.get(word, 0)
            pos_score = pos_scores.get(word, 0)
            length_score = length_scores.get(word, 0)

            hybrid_score = (freq_score * 0.5 + pos_score * 0.3 + length_score * 0.2)
            hybrid_scores[word] = hybrid_score

        # 按分数排序
        keywords_with_scores = sorted(
            hybrid_scores.items(),
            key=lambda x: x[1],
            reverse=True
        )

        return keywords_with_scores

    def extract_phrases(
        self,
        text: str,
        min_phrase_length: int = 2,
        max_phrase_length: int = 5,
        max_phrases: int = 5
    ) -> List[str]:
        """
        提取词组短语

        Args:
            text: 输入文本
            min_phrase_length: 最小短语长度
            max_phrase_length: 最大短语长度
            max_phrases: 最大短语数量

        Returns:
            List[str]: 短语列表
        """
        words = self._tokenize(text)
        filtered_words = self._filter_words(words)

        if len(filtered_words) < min_phrase_length:
            return []

        # 生成所有可能的短语
        phrases = []
        for length in range(min_phrase_length, min(max_phrase_length, len(filtered_words)) + 1):
            for i in range(len(filtered_words) - length + 1):
                phrase = ' '.join(filtered_words[i:i + length])
                phrases.append(phrase)

        # 统计短语频率
        phrase_freq = Counter(phrases)

        # 按频率排序并去重
        sorted_phrases = phrase_freq.most_common(max_phrases)

        return [phrase for phrase, _ in sorted_phrases]

    def calculate_keyword_similarity(
        self,
        keywords1: List[str],
        keywords2: List[str]
    ) -> float:
        """
        计算两组关键词的相似度

        Args:
            keywords1: 第一组关键词
            keywords2: 第二组关键词

        Returns:
            float: 相似度得分 (0-1)
        """
        set1 = set(keywords1)
        set2 = set(keywords2)

        if not set1 and not set2:
            return 1.0

        if not set1 or not set2:
            return 0.0

        # Jaccard相似度
        intersection = set1.intersection(set2)
        union = set1.union(set2)

        return len(intersection) / len(union) if union else 0.0

    def get_keyword_stats(self, text: str) -> Dict[str, Any]:
        """
        获取关键词统计信息

        Args:
            text: 输入文本

        Returns:
            Dict[str, Any]: 统计信息
        """
        if not text:
            return {
                "total_words": 0,
                "unique_words": 0,
                "filtered_words": 0,
                "keyword_density": 0,
                "avg_word_length": 0
            }

        words = self._tokenize(text)
        filtered_words = self._filter_words(words)

        # 计算平均词长
        avg_length = sum(len(word) for word in filtered_words) / len(filtered_words) if filtered_words else 0

        # 计算关键词密度
        keyword_density = len(filtered_words) / len(words) if words else 0

        return {
            "total_words": len(words),
            "unique_words": len(set(words)),
            "filtered_words": len(filtered_words),
            "stopwords_count": len(words) - len(filtered_words),
            "keyword_density": round(keyword_density, 3),
            "avg_word_length": round(avg_length, 1),
            "top_keywords": self.extract_keywords(text, max_keywords=5, include_scores=True)
        }

    def __str__(self) -> str:
        """返回提取器信息的字符串表示"""
        jieba_status = "已启用" if self.use_jieba else "未启用"
        return f"""
=== 关键词提取器 ===
支持方法: frequency, position, length, hybrid
分词库: jieba {jieba_status}
停用词数量: {len(self.stopwords)}
最小词长: {self.min_word_length}
最大关键词数: {self.max_keywords}
        """.strip()