"""
迭代式关键词扩展搜索器
实现多轮搜索、关键词扩展、话题提取等功能
"""
import logging
import time
import string
from typing import List, Dict, Set, Tuple, Optional
from concurrent.futures import ProcessPoolExecutor, as_completed
import multiprocessing as mp
from dataclasses import dataclass
from .database import DatabaseManager
from .data_sources import DataSourceManager
from .data_cleaner import KeywordCleaner
import jieba
import jieba.analyse


@dataclass
class SearchConfig:
    """搜索配置"""
    max_rounds: int = 5  # 最大搜索轮数
    query_batch_size: int = 100  # 数据库查询批处理大小
    loop_batch_size: int = 10  # 循环处理批处理大小
    max_processes: int = 4  # 最大进程数
    delay_between_rounds: float = 2.0  # 轮次间延迟
    min_keyword_length: int = 2  # 最小关键词长度
    max_keyword_length: int = 50  # 最大关键词长度


class IterativeKeywordSearcher:
    """迭代式关键词搜索器"""
    
    def __init__(self, config: Dict):
        """
        初始化搜索器
        
        Args:
            config: 配置字典
        """
        self.config = config
        self.logger = logging.getLogger(__name__)
        
        # 搜索配置
        search_config = config.get('iterative_search', {})
        self.search_config = SearchConfig(**search_config)
        
        # 初始化组件
        self.db_manager = DatabaseManager(config)
        self.source_manager = DataSourceManager(config)
        self.cleaner = KeywordCleaner(config)
        
        # 扩展字符集
        self.expansion_chars = list(string.ascii_lowercase) + list(string.digits)
        
        self.logger.info("迭代式关键词搜索器初始化完成")
    
    def expand_single_keyword(self, keyword: str) -> List[str]:
        """
        扩展单个关键词
        
        Args:
            keyword: 原始关键词
        
        Returns:
            扩展后的关键词列表
        """
        expanded_keywords = []
        
        # 添加原始关键词
        expanded_keywords.append(keyword)
        
        # 与字母和数字拼接
        for char in self.expansion_chars:
            expanded_keyword = f"{keyword}{char}"
            expanded_keywords.append(expanded_keyword)
        
        # 去重
        expanded_keywords = list(set(expanded_keywords))
        
        return expanded_keywords
    
    def expand_seed_keywords(self, seed_keywords: List[str]) -> List[str]:
        """
        扩展种子关键词（保留此方法用于兼容性）
        
        Args:
            seed_keywords: 种子关键词列表
        
        Returns:
            扩展后的关键词列表
        """
        expanded_keywords = []
        
        for seed_keyword in seed_keywords:
            expanded_keywords.extend(self.expand_single_keyword(seed_keyword))
        
        # 去重
        expanded_keywords = list(set(expanded_keywords))
        
        self.logger.info(f"种子关键词 {len(seed_keywords)} 个，扩展后 {len(expanded_keywords)} 个")
        return expanded_keywords
    
    def search_keywords_single_process(self, keywords: List[str], search_round: int) -> Tuple[List[Tuple[str, str, str]], List[Tuple[str, str, str]]]:
        """
        单进程搜索关键词
        
        Args:
            keywords: 关键词列表
            search_round: 搜索轮数
        
        Returns:
            (话题列表, 关键词列表) - 每个元素包含(内容, 源关键词, 搜索引擎来源)
        """
        topics = []
        new_keywords = []
        
        for keyword in keywords:
            try:
                # 从所有数据源搜索，获取带来源信息的结果
                search_results = self.source_manager.collect_from_all_sources(keyword, with_source_info=True)
                
                if search_results:
                    # 将搜索结果作为话题保存
                    for result, source_name in search_results:
                        if self._is_valid_topic(result):
                            topics.append((result, keyword, source_name))
                    
                    # 从话题中提取关键词
                    for topic_text, source_keyword, search_source in topics:
                        extracted_keywords = self._extract_keywords_from_topic(topic_text, source_keyword)
                        for kw in extracted_keywords:
                            # 这里不需要再验证，因为_extract_keywords_from_topic已经过滤过了
                            new_keywords.append((kw, topic_text, search_source))
                
                # 添加延迟，避免请求过于频繁
                time.sleep(0.5)
                
            except Exception as e:
                self.logger.error(f"搜索关键词失败: {keyword}, 错误: {e}")
                continue
        
        return topics, new_keywords
    
    def _is_valid_topic(self, topic: str) -> bool:
        """检查话题是否有效"""
        if not topic or not isinstance(topic, str):
            return False
        
        topic = topic.strip()
        if len(topic) < self.search_config.min_keyword_length:
            return False
        
        if len(topic) > self.search_config.max_keyword_length * 3:  # 话题可以比关键词长
            return False
        
        return True
    
    def _is_valid_keyword(self, keyword: str) -> bool:
        """检查关键词是否有效"""
        if not keyword or not isinstance(keyword, str):
            return False
        
        keyword = keyword.strip()
        if len(keyword) < self.search_config.min_keyword_length:
            return False
        
        if len(keyword) > self.search_config.max_keyword_length:
            return False
        
        # 过滤掉纯数字或纯符号
        if keyword.isdigit() or not any(c.isalnum() for c in keyword):
            return False
        
        return True
    
    def _extract_keywords_from_topic(self, topic: str, source_keyword: str) -> List[str]:
        """
        从话题中提取包含原关键词的有价值长尾词
        
        Args:
            topic: 话题文本
            source_keyword: 源关键词（用于确保相关性）
        
        Returns:
            提取的有价值长尾词列表
        """
        try:
            # 策略1: 使用jieba的TextRank算法提取关键词短语（保持词组合）
            phrase_keywords = jieba.analyse.textrank(
                topic,
                topK=15,
                withWeight=False,
                allowPOS=('n', 'nr', 'ns', 'nt', 'nz', 'v', 'vn', 'a', 'an')
            )
            
            # 策略2: 使用jieba的TF-IDF提取单个关键词
            single_keywords = jieba.analyse.extract_tags(
                topic,
                topK=20,
                withWeight=False,
                allowPOS=('n', 'nr', 'ns', 'nt', 'nz', 'v', 'vn', 'a', 'an')
            )
            
            # 策略3: 智能组合提取 - 在原关键词周围寻找相关词汇组合
            combination_keywords = self._extract_combination_keywords(topic, source_keyword)
            
            # 策略4: 基于标点符号和语义分割提取完整短语
            phrase_segments = self._extract_phrase_segments(topic, source_keyword)
            
            # 合并所有候选关键词
            all_candidates = []
            all_candidates.extend(phrase_keywords)
            all_candidates.extend(single_keywords)
            all_candidates.extend(combination_keywords)
            all_candidates.extend(phrase_segments)
            
            # 去重并筛选有价值的长尾词
            valuable_keywords = []
            seen_keywords = set()
            
            for kw in all_candidates:
                if kw in seen_keywords:
                    continue
                seen_keywords.add(kw)
                
                if self._is_valuable_longtail_keyword(kw, source_keyword, topic):
                    valuable_keywords.append(kw)
            
            # 按价值排序，优先选择更具体的长尾词
            valuable_keywords.sort(key=lambda x: self._calculate_keyword_value(x, source_keyword), reverse=True)
            
            # 限制返回数量，只返回最有价值的前几个
            max_keywords = min(8, len(valuable_keywords))
            result = valuable_keywords[:max_keywords]
            
            self.logger.debug(f"从话题提取关键词: 原关键词='{source_keyword}', 话题='{topic[:50]}...', 提取到{len(result)}个有价值关键词")
            
            return result
            
        except Exception as e:
            self.logger.error(f"从话题提取关键词失败: {topic}, 错误: {e}")
            return []
    
    def _extract_combination_keywords(self, topic: str, source_keyword: str) -> List[str]:
        """
        在原关键词周围寻找相关词汇组合，构建完整的长尾词
        
        Args:
            topic: 话题文本
            source_keyword: 源关键词
        
        Returns:
            组合关键词列表
        """
        try:
            combination_keywords = []
            
            # 在原关键词前后寻找相关词汇
            if source_keyword in topic:
                start_pos = topic.find(source_keyword)
                end_pos = start_pos + len(source_keyword)
                
                # 向前扩展（最多3个字符）
                for i in range(1, 4):
                    if start_pos - i >= 0:
                        prefix = topic[start_pos - i:start_pos]
                        if self._is_valid_word_prefix(prefix):
                            combination_keywords.append(prefix + source_keyword)
                
                # 向后扩展（最多12个字符）
                for i in range(2, 12):
                    if end_pos + i <= len(topic):
                        suffix = topic[end_pos:end_pos + i]
                        if self._is_valid_word_suffix(suffix):
                            combination_keywords.append(source_keyword + suffix)
                
                # 前后同时扩展
                for pre_len in range(1, 3):
                    for suf_len in range(1, 4):
                        if (start_pos - pre_len >= 0 and 
                            end_pos + suf_len <= len(topic)):
                            prefix = topic[start_pos - pre_len:start_pos]
                            suffix = topic[end_pos:end_pos + suf_len]
                            if (self._is_valid_word_prefix(prefix) and 
                                self._is_valid_word_suffix(suffix)):
                                combination_keywords.append(prefix + source_keyword + suffix)
            
            return combination_keywords
            
        except Exception as e:
            self.logger.error(f"组合关键词提取失败: {e}")
            return []
    
    def _extract_phrase_segments(self, topic: str, source_keyword: str) -> List[str]:
        """
        基于标点符号和语义分割提取完整短语
        
        Args:
            topic: 话题文本
            source_keyword: 源关键词
        
        Returns:
            短语列表
        """
        try:
            phrase_segments = []
            
            # 按标点符号分割
            import re
            segments = re.split(r'[，。！？；：、]', topic)
            
            for segment in segments:
                segment = segment.strip()
                if not segment:
                    continue
                
                # 如果段落包含原关键词，尝试提取完整短语
                if source_keyword in segment:
                    # 提取包含原关键词的完整短语
                    start_pos = segment.find(source_keyword)
                    end_pos = start_pos + len(source_keyword)
                    
                    # 向前扩展寻找短语开头
                    phrase_start = start_pos
                    for i in range(start_pos - 1, -1, -1):
                        if segment[i] in '的得地':
                            phrase_start = i + 1
                            break
                        elif len(segment[i:start_pos]) > 8:  # 限制短语长度
                            break
                    
                    # 向后扩展寻找短语结尾
                    phrase_end = end_pos
                    for i in range(end_pos, len(segment)):
                        if segment[i] in '的得地':
                            phrase_end = i
                            break
                        elif len(segment[start_pos:i+1]) > 12:  # 限制短语长度
                            break
                    
                    # 提取完整短语
                    if phrase_end > phrase_start:
                        phrase = segment[phrase_start:phrase_end]
                        if len(phrase) > len(source_keyword) and self._is_valid_phrase(phrase):
                            phrase_segments.append(phrase)
            
            return phrase_segments
            
        except Exception as e:
            self.logger.error(f"短语分割提取失败: {e}")
            return []
    
    def _is_valid_word_prefix(self, prefix: str) -> bool:
        """判断是否为有效的前缀"""
        if not prefix:
            return False
        
        # 检查是否为有效的词汇前缀
        valid_prefixes = ['的', '得', '地', '和', '与', '及', '或', '但', '而', '且']
        if prefix in valid_prefixes:
            return True
        
        # 检查是否为单个汉字（可能是形容词或名词）
        if len(prefix) == 1 and '\u4e00' <= prefix <= '\u9fff':
            return True
        
        return False
    
    def _is_valid_word_suffix(self, suffix: str) -> bool:
        """判断是否为有效的后缀"""
        if not suffix:
            return False
        
        # 检查是否为有效的词汇后缀
        valid_suffixes = ['的', '得', '地', '了', '着', '过', '中', '内', '外', '上', '下']
        if suffix in valid_suffixes:
            return True
        
        # 检查是否为有意义的词汇
        meaningful_suffixes = ['方案', '策略', '技巧', '方法', '工具', '软件', '平台', '服务', '产品', '品牌']
        for ms in meaningful_suffixes:
            if suffix.endswith(ms):
                return True
        
        # 检查是否为单个汉字（可能是名词或形容词）
        if len(suffix) == 1 and '\u4e00' <= suffix <= '\u9fff':
            return True
        
        return False
    
    def _is_valid_phrase(self, phrase: str) -> bool:
        """判断是否为有效的短语"""
        if not phrase or len(phrase) < 2:
            return False
        
        # 检查是否包含无效字符
        invalid_chars = ['\n', '\t', '\r', '　']  # 全角空格
        for char in invalid_chars:
            if char in phrase:
                return False
        
        # 检查长度是否合理
        if len(phrase) > 20:
            return False
        
        return True
    
    def _is_valuable_longtail_keyword(self, keyword: str, source_keyword: str, topic: str) -> bool:
        """
        判断是否为有价值的长尾关键词
        
        Args:
            keyword: 候选关键词
            source_keyword: 源关键词
            topic: 话题文本
        
        Returns:
            是否为有价值的长尾关键词
        """
        # 基本验证
        if not self._is_valid_keyword(keyword):
            return False
        
        # 必须包含原关键词（确保相关性）
        if source_keyword not in keyword:
            return False
        
        # 避免完全相同的关键词
        if keyword == source_keyword:
            return False
        
        # 检查是否为长尾词（比原关键词更长、更具体）
        if len(keyword) <= len(source_keyword):
            return False
        
        # 检查是否在话题中出现（确保上下文相关性）
        if keyword not in topic:
            return False
        
        # 过滤掉过于泛泛的关键词
        generic_patterns = [
            '什么', '怎么', '如何', '为什么', '哪里', '哪个', '哪些',
            '方法', '技巧', '攻略', '教程', '指南', '手册', '大全',
            '推荐', '介绍', '说明', '解释', '分析', '总结', '概述'
        ]
        
        for pattern in generic_patterns:
            if pattern in keyword:
                return False
        
        return True
    
    def _calculate_keyword_value(self, keyword: str, source_keyword: str) -> float:
        """
        计算关键词的价值分数
        
        Args:
            keyword: 关键词
            source_keyword: 源关键词
        
        Returns:
            价值分数（越高越有价值）
        """
        score = 0.0
        
        # 长度奖励：长尾词通常更有价值
        length_bonus = min(len(keyword) - len(source_keyword), 10) * 0.5
        score += length_bonus
        
        # 具体性奖励：包含具体词汇的加分
        specific_words = ['方案', '策略', '技巧', '方法', '工具', '软件', '平台', '服务', '产品', '品牌']
        for word in specific_words:
            if word in keyword:
                score += 2.0
        
        # 行业术语奖励：包含专业词汇的加分
        industry_words = ['营销', '推广', '运营', '设计', '开发', '测试', '部署', '维护', '优化', '分析']
        for word in industry_words:
            if word in keyword:
                score += 1.5
        
        # 避免过于泛泛的词汇
        generic_penalty = 0
        generic_words = ['什么', '怎么', '如何', '为什么', '哪里', '哪个', '哪些']
        for word in generic_words:
            if word in keyword:
                generic_penalty -= 3.0
        
        score += generic_penalty
        
        return max(score, 0.0)  # 确保分数不为负数
    
    def save_search_results(self, topics: List[Tuple[str, str, str]], keywords: List[Tuple[str, str, str]], search_round: int):
        """
        保存搜索结果到数据库
        
        Args:
            topics: 话题列表 [(话题文本, 源关键词, 搜索引擎来源)]
            keywords: 关键词列表 [(关键词文本, 源话题, 搜索引擎来源)]
            search_round: 搜索轮数
        """
        # 保存话题
        topic_count = 0
        for topic_text, source_keyword, search_source in topics:
            if self.db_manager.add_topic(topic_text, source_keyword, search_round):
                topic_count += 1
        
        # 保存关键词
        keyword_count = 0
        for keyword_text, source_topic, search_source in keywords:
            if self.db_manager.add_keyword(keyword_text, source_topic, search_source, search_round):
                keyword_count += 1
        
        self.logger.info(f"第 {search_round} 轮搜索保存完成: 话题 {topic_count} 个，关键词 {keyword_count} 个")
    
    def run_iterative_search(self, seed_keywords: List[str]) -> Dict:
        """
        运行迭代式搜索
        
        Args:
            seed_keywords: 种子关键词列表
        
        Returns:
            搜索结果统计
        """
        self.logger.info(f"开始迭代式搜索，种子关键词: {len(seed_keywords)} 个")
        
        # 只保存原始种子关键词到数据库，不保存扩展后的
        for keyword in seed_keywords:
            self.db_manager.add_keyword(keyword, "seed", "seed", 0)
        
        current_round = 1
        total_topics = 0
        total_keywords = 0
        
        while current_round <= self.search_config.max_rounds:
            self.logger.info(f"开始第 {current_round} 轮搜索...")
            
            # 获取未处理的关键词
            unprocessed_keywords = self.db_manager.get_unprocessed_keywords(
                self.search_config.query_batch_size
            )
            
            if not unprocessed_keywords:
                self.logger.info("关键词库为空，搜索结束")
                break
            
            self.logger.info(f"第 {current_round} 轮处理 {len(unprocessed_keywords)} 个关键词")
            
            # 分批处理原始关键词，避免一次处理太多
            batch_results = []
            for i in range(0, len(unprocessed_keywords), self.search_config.loop_batch_size):
                batch = unprocessed_keywords[i:i + self.search_config.loop_batch_size]
                
                for keyword in batch:
                    # 扩展当前关键词
                    expanded_keywords = self.expand_single_keyword(keyword)
                    
                    # 使用扩展后的关键词进行搜索
                    topics, keywords = self.search_keywords_single_process(expanded_keywords, current_round)
                    batch_results.append((topics, keywords))
                    
                    # 标记原始关键词为已处理
                    self.db_manager.mark_keyword_processed(keyword)
            
            # 合并结果
            all_topics = []
            all_keywords = []
            for topics, keywords in batch_results:
                all_topics.extend(topics)
                all_keywords.extend(keywords)
            
            # 保存结果
            self.save_search_results(all_topics, all_keywords, current_round)
            
            # 统计
            round_topics = len(all_topics)
            round_keywords = len(all_keywords)
            total_topics += round_topics
            total_keywords += round_keywords
            
            self.logger.info(f"第 {current_round} 轮完成: 话题 {round_topics} 个，关键词 {round_keywords} 个")
            
            # 检查是否继续
            if round_keywords == 0:
                self.logger.info("本轮未发现新关键词，搜索结束")
                break
            
            current_round += 1
            
            # 轮次间延迟
            if current_round <= self.search_config.max_rounds:
                time.sleep(self.search_config.delay_between_rounds)
        
        # 生成最终统计
        final_stats = {
            'total_rounds': current_round - 1,
            'total_topics': total_topics,
            'total_keywords': total_keywords,
            'final_topic_count': self.db_manager.get_topic_count(),
            'final_keyword_count': self.db_manager.get_keyword_count(),
            'seed_keywords': len(seed_keywords),
            'expanded_keywords': len(seed_keywords) * (len(self.expansion_chars) + 1)  # 理论扩展数量
        }
        
        self.logger.info("迭代式搜索完成")
        self.logger.info(f"最终统计: {final_stats}")
        
        return final_stats
    
    def get_search_statistics(self) -> Dict:
        """获取搜索统计信息"""
        return {
            'topic_count': self.db_manager.get_topic_count(),
            'keyword_count': self.db_manager.get_keyword_count(),
            'processed_keyword_count': self.db_manager.get_keyword_count(processed_only=True),
            'unprocessed_keyword_count': self.db_manager.get_keyword_count() - self.db_manager.get_keyword_count(processed_only=True)
        }
    
    def cleanup(self):
        """清理资源"""
        try:
            self.db_manager.close()
            self.logger.info("搜索器资源清理完成")
        except Exception as e:
            self.logger.error(f"清理资源失败: {e}")


def create_iterative_searcher(config: Dict) -> IterativeKeywordSearcher:
    """
    创建迭代式搜索器实例
    
    Args:
        config: 配置字典
    
    Returns:
        迭代式搜索器实例
    """
    return IterativeKeywordSearcher(config)
