# -*- coding: utf-8 -*-
"""
多步推理检索器
基于多步推理的检索方法
"""

import json
import os
from typing import Dict, List, Any, Tuple, Optional
from loguru import logger
from collections import defaultdict
import re

from .base_retriever import BaseRetriever, RetrievalResult
from ..ollama_client import ollama_client


class ReasoningStep:
    """推理步骤"""
    
    def __init__(self, step_id: str, step_type: str, query: str, result: Any = None, confidence: float = 0.0):
        self.step_id = step_id
        self.step_type = step_type  # decompose, search, analyze, synthesize
        self.query = query
        self.result = result
        self.confidence = confidence
        self.metadata = {}
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            'step_id': self.step_id,
            'step_type': self.step_type,
            'query': self.query,
            'result': self.result,
            'confidence': self.confidence,
            'metadata': self.metadata
        }


class MultiStepRetriever(BaseRetriever):
    """多步推理检索器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化多步推理检索器
        
        Args:
            config: 配置参数
        """
        super().__init__("MultiStepRetriever", config)
        
        # 配置参数
        self.max_reasoning_steps = self.config.get("max_reasoning_steps", 5)
        self.min_confidence_threshold = self.config.get("min_confidence_threshold", 0.6)
        self.decomposition_method = self.config.get("decomposition_method", "llm")  # llm, rule_based
        self.search_strategy = self.config.get("search_strategy", "breadth_first")  # breadth_first, depth_first
        self.enable_self_reflection = self.config.get("enable_self_reflection", True)
        self.cache_path = self.config.get("cache_path", "data/multi_step_cache.json")
        
        # 数据存储
        self.data = []
        self.doc_index = {}
        self.keyword_index = defaultdict(list)
        self.concept_index = defaultdict(list)
        
        # 推理缓存
        self.reasoning_cache = {}
        
        # 推理模板
        self.reasoning_templates = {
            'decompose': """
请将以下复杂问题分解为更简单的子问题：

原问题: {question}

请返回3-5个子问题，每行一个，格式如下：
1. 子问题1
2. 子问题2
3. 子问题3
""",
            'analyze': """
请分析以下信息并提取关键要点：

问题: {question}
信息: {information}

请提取关键要点和相关信息。
""",
            'synthesize': """
请基于以下信息综合回答问题：

问题: {question}

相关信息:
{information}

请提供完整、准确的答案。
""",
            'reflect': """
请评估以下答案的质量和完整性：

问题: {question}
答案: {answer}

评估标准：
1. 准确性 (0-10)
2. 完整性 (0-10)
3. 相关性 (0-10)

请返回评分和改进建议。
"""
        }
        
        logger.info("🧠 多步推理检索器初始化完成")
    
    def initialize(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化多步推理检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info(f"📊 开始初始化多步推理检索器，数据量: {len(data)}")
            
            # 加载缓存
            self._load_cache()
            
            # 存储数据
            self.data = data
            
            # 构建索引
            self._build_indices()
            
            self.is_initialized = True
            logger.info("✅ 多步推理检索器初始化成功")
            return True
            
        except Exception as e:
            logger.error(f"❌ 多步推理检索器初始化异常: {e}")
            return False
    
    def _build_indices(self) -> None:
        """
        构建索引
        """
        logger.info("🔧 构建多步推理索引...")
        
        for i, item in enumerate(self.data):
            doc_id = str(item.get('id', i))
            self.doc_index[doc_id] = item
            
            # 构建关键词索引
            text = f"{item['question']} {item.get('answer', '')}"
            keywords = self._extract_keywords(text)
            
            for keyword in keywords:
                self.keyword_index[keyword.lower()].append(doc_id)
            
            # 构建概念索引
            concepts = self._extract_concepts(text)
            for concept in concepts:
                self.concept_index[concept.lower()].append(doc_id)
        
        logger.info(f"✅ 索引构建完成，关键词: {len(self.keyword_index)}, 概念: {len(self.concept_index)}")
    
    def retrieve(self, question: str, top_k: int = 5) -> RetrievalResult:
        """
        执行多步推理检索
        
        Args:
            question: 查询问题
            top_k: 返回结果数量
        
        Returns:
            RetrievalResult: 检索结果
        """
        if not self.is_initialized:
            logger.error("❌ 多步推理检索器未初始化")
            return RetrievalResult(question, "", 0.0, 0.0)
        
        try:
            # 测量检索时间
            def _retrieve_internal():
                # 1. 问题分析和分解
                reasoning_steps = []
                sub_questions = self._decompose_question(question)
                
                decompose_step = ReasoningStep(
                    step_id="step_1",
                    step_type="decompose",
                    query=question,
                    result=sub_questions,
                    confidence=0.8
                )
                reasoning_steps.append(decompose_step)
                
                # 2. 多步搜索和推理
                all_evidence = []
                
                for i, sub_question in enumerate(sub_questions):
                    # 搜索步骤
                    search_results = self._search_for_subquestion(sub_question)
                    
                    search_step = ReasoningStep(
                        step_id=f"step_{i+2}_search",
                        step_type="search",
                        query=sub_question,
                        result=search_results,
                        confidence=self._calculate_search_confidence(search_results)
                    )
                    reasoning_steps.append(search_step)
                    
                    # 分析步骤
                    analysis = self._analyze_search_results(sub_question, search_results)
                    
                    analysis_step = ReasoningStep(
                        step_id=f"step_{i+2}_analyze",
                        step_type="analyze",
                        query=sub_question,
                        result=analysis,
                        confidence=0.7
                    )
                    reasoning_steps.append(analysis_step)
                    
                    all_evidence.extend(search_results)
                
                # 3. 综合推理
                final_answer, confidence = self._synthesize_answer(question, all_evidence, reasoning_steps)
                
                synthesize_step = ReasoningStep(
                    step_id="final_synthesis",
                    step_type="synthesize",
                    query=question,
                    result=final_answer,
                    confidence=confidence
                )
                reasoning_steps.append(synthesize_step)
                
                # 4. 自我反思（可选）
                if self.enable_self_reflection:
                    reflection = self._self_reflect(question, final_answer)
                    
                    reflect_step = ReasoningStep(
                        step_id="self_reflection",
                        step_type="reflect",
                        query=question,
                        result=reflection,
                        confidence=0.6
                    )
                    reasoning_steps.append(reflect_step)
                    
                    # 根据反思调整答案
                    if reflection.get('needs_improvement', False):
                        final_answer, confidence = self._improve_answer(question, final_answer, reflection)
                
                return final_answer, confidence, all_evidence, reasoning_steps
            
            result_data, response_time = self._measure_time(_retrieve_internal)
            final_answer, confidence, all_evidence, reasoning_steps = result_data
            
            # 更新成功计数
            if final_answer:
                self.success_count += 1
            
            return RetrievalResult(
                question=question,
                answer=final_answer,
                confidence=confidence,
                response_time=response_time,
                retrieved_docs=all_evidence[:top_k],
                metadata={
                    'method': 'multi_step_reasoning',
                    'reasoning_steps': [step.to_dict() for step in reasoning_steps],
                    'num_steps': len(reasoning_steps),
                    'num_evidence': len(all_evidence)
                }
            )
            
        except Exception as e:
            logger.error(f"❌ 多步推理检索失败: {e}")
            return RetrievalResult(question, "", 0.0, 0.0)
    
    def _decompose_question(self, question: str) -> List[str]:
        """
        分解问题
        
        Args:
            question: 原始问题
        
        Returns:
            List[str]: 子问题列表
        """
        # 检查缓存
        cache_key = f"decompose_{hash(question)}"
        if cache_key in self.reasoning_cache:
            return self.reasoning_cache[cache_key]
        
        sub_questions = []
        
        if self.decomposition_method == "llm":
            sub_questions = self._llm_decompose(question)
        else:
            sub_questions = self._rule_based_decompose(question)
        
        # 缓存结果
        self.reasoning_cache[cache_key] = sub_questions
        
        logger.info(f"🔍 问题分解: {question} -> {len(sub_questions)} 个子问题")
        return sub_questions
    
    def _llm_decompose(self, question: str) -> List[str]:
        """
        使用LLM分解问题
        
        Args:
            question: 原始问题
        
        Returns:
            List[str]: 子问题列表
        """
        try:
            prompt = self.reasoning_templates['decompose'].format(question=question)
            response = ollama_client.generate_text(prompt)
            
            # 解析子问题
            sub_questions = []
            lines = response.split('\n')
            
            for line in lines:
                line = line.strip()
                if re.match(r'^\d+\.\s*', line):
                    sub_question = re.sub(r'^\d+\.\s*', '', line).strip()
                    if sub_question:
                        sub_questions.append(sub_question)
            
            # 如果没有解析到子问题，使用原问题
            if not sub_questions:
                sub_questions = [question]
            
            return sub_questions[:5]  # 限制最多5个子问题
            
        except Exception as e:
            logger.warning(f"⚠️ LLM问题分解失败: {e}")
            return [question]
    
    def _rule_based_decompose(self, question: str) -> List[str]:
        """
        基于规则的问题分解
        
        Args:
            question: 原始问题
        
        Returns:
            List[str]: 子问题列表
        """
        sub_questions = []
        
        # 检测问题类型并分解
        if "什么是" in question or "定义" in question:
            # 定义类问题
            concept = self._extract_main_concept(question)
            if concept:
                sub_questions = [
                    f"{concept}的定义是什么？",
                    f"{concept}有什么特点？",
                    f"{concept}的应用场景有哪些？"
                ]
        
        elif "如何" in question or "怎么" in question:
            # 方法类问题
            action = self._extract_main_action(question)
            if action:
                sub_questions = [
                    f"{action}的基本步骤是什么？",
                    f"{action}需要什么条件？",
                    f"{action}有什么注意事项？"
                ]
        
        elif "为什么" in question or "原因" in question:
            # 原因类问题
            phenomenon = self._extract_main_phenomenon(question)
            if phenomenon:
                sub_questions = [
                    f"{phenomenon}的直接原因是什么？",
                    f"{phenomenon}的根本原因是什么？",
                    f"{phenomenon}的影响因素有哪些？"
                ]
        
        elif "比较" in question or "区别" in question:
            # 比较类问题
            entities = self._extract_comparison_entities(question)
            if len(entities) >= 2:
                sub_questions = [
                    f"{entities[0]}的特点是什么？",
                    f"{entities[1]}的特点是什么？",
                    f"{entities[0]}和{entities[1]}的主要区别是什么？"
                ]
        
        # 如果没有分解出子问题，使用原问题
        if not sub_questions:
            sub_questions = [question]
        
        return sub_questions
    
    def _extract_main_concept(self, question: str) -> str:
        """提取主要概念"""
        # 简单的概念提取
        patterns = [r'什么是(.+?)\?', r'(.+?)的定义', r'(.+?)是什么']
        for pattern in patterns:
            match = re.search(pattern, question)
            if match:
                return match.group(1).strip()
        return ""
    
    def _extract_main_action(self, question: str) -> str:
        """提取主要动作"""
        patterns = [r'如何(.+?)\?', r'怎么(.+?)\?', r'(.+?)的方法']
        for pattern in patterns:
            match = re.search(pattern, question)
            if match:
                return match.group(1).strip()
        return ""
    
    def _extract_main_phenomenon(self, question: str) -> str:
        """提取主要现象"""
        patterns = [r'为什么(.+?)\?', r'(.+?)的原因', r'(.+?)为什么']
        for pattern in patterns:
            match = re.search(pattern, question)
            if match:
                return match.group(1).strip()
        return ""
    
    def _extract_comparison_entities(self, question: str) -> List[str]:
        """提取比较实体"""
        # 查找"和"、"与"等连接词
        entities = []
        patterns = [r'(.+?)和(.+?)的', r'(.+?)与(.+?)的', r'比较(.+?)和(.+?)']
        
        for pattern in patterns:
            match = re.search(pattern, question)
            if match:
                entities = [match.group(1).strip(), match.group(2).strip()]
                break
        
        return entities
    
    def _search_for_subquestion(self, sub_question: str) -> List[Dict[str, Any]]:
        """
        为子问题搜索相关信息
        
        Args:
            sub_question: 子问题
        
        Returns:
            List[Dict[str, Any]]: 搜索结果
        """
        results = []
        
        # 1. 关键词搜索
        keywords = self._extract_keywords(sub_question)
        keyword_results = self._keyword_search(keywords)
        results.extend(keyword_results)
        
        # 2. 概念搜索
        concepts = self._extract_concepts(sub_question)
        concept_results = self._concept_search(concepts)
        results.extend(concept_results)
        
        # 3. 语义搜索
        semantic_results = self._semantic_search(sub_question)
        results.extend(semantic_results)
        
        # 去重和评分
        unique_results = self._deduplicate_and_score(results, sub_question)
        
        return unique_results[:5]  # 返回前5个结果
    
    def _keyword_search(self, keywords: List[str]) -> List[Dict[str, Any]]:
        """
        关键词搜索
        
        Args:
            keywords: 关键词列表
        
        Returns:
            List[Dict[str, Any]]: 搜索结果
        """
        results = []
        doc_scores = defaultdict(float)
        
        for keyword in keywords:
            if keyword.lower() in self.keyword_index:
                doc_ids = self.keyword_index[keyword.lower()]
                for doc_id in doc_ids:
                    doc_scores[doc_id] += 1.0 / len(keywords)
        
        for doc_id, score in doc_scores.items():
            if doc_id in self.doc_index:
                doc = self.doc_index[doc_id].copy()
                doc['score'] = score
                doc['search_method'] = 'keyword'
                results.append(doc)
        
        return results
    
    def _concept_search(self, concepts: List[str]) -> List[Dict[str, Any]]:
        """
        概念搜索
        
        Args:
            concepts: 概念列表
        
        Returns:
            List[Dict[str, Any]]: 搜索结果
        """
        results = []
        doc_scores = defaultdict(float)
        
        for concept in concepts:
            if concept.lower() in self.concept_index:
                doc_ids = self.concept_index[concept.lower()]
                for doc_id in doc_ids:
                    doc_scores[doc_id] += 1.5 / len(concepts)  # 概念权重更高
        
        for doc_id, score in doc_scores.items():
            if doc_id in self.doc_index:
                doc = self.doc_index[doc_id].copy()
                doc['score'] = score
                doc['search_method'] = 'concept'
                results.append(doc)
        
        return results
    
    def _semantic_search(self, query: str) -> List[Dict[str, Any]]:
        """
        语义搜索
        
        Args:
            query: 查询文本
        
        Returns:
            List[Dict[str, Any]]: 搜索结果
        """
        results = []
        
        # 简单的语义匹配（基于词汇重叠）
        query_words = set(query.lower().split())
        
        for doc_id, doc in self.doc_index.items():
            doc_text = f"{doc['question']} {doc.get('answer', '')}"
            doc_words = set(doc_text.lower().split())
            
            # 计算词汇重叠度
            overlap = len(query_words.intersection(doc_words))
            total = len(query_words.union(doc_words))
            
            if total > 0:
                similarity = overlap / total
                if similarity > 0.1:  # 最小阈值
                    result_doc = doc.copy()
                    result_doc['score'] = similarity
                    result_doc['search_method'] = 'semantic'
                    results.append(result_doc)
        
        return results
    
    def _deduplicate_and_score(self, results: List[Dict[str, Any]], query: str) -> List[Dict[str, Any]]:
        """
        去重和重新评分
        
        Args:
            results: 搜索结果
            query: 查询文本
        
        Returns:
            List[Dict[str, Any]]: 去重后的结果
        """
        # 按文档ID去重，保留最高分
        doc_best_scores = {}
        for result in results:
            doc_id = result.get('id')
            if doc_id not in doc_best_scores or result['score'] > doc_best_scores[doc_id]['score']:
                doc_best_scores[doc_id] = result
        
        unique_results = list(doc_best_scores.values())
        
        # 重新评分
        for result in unique_results:
            # 综合多种因素的分数
            base_score = result['score']
            
            # 文本长度因子
            answer_length = len(result.get('answer', ''))
            length_factor = min(answer_length / 100, 1.0)  # 归一化
            
            # 查询匹配因子
            query_match = self._calculate_query_match(query, result)
            
            # 最终分数
            final_score = base_score * 0.6 + length_factor * 0.2 + query_match * 0.2
            result['final_score'] = final_score
        
        # 按最终分数排序
        unique_results.sort(key=lambda x: x.get('final_score', 0), reverse=True)
        
        return unique_results
    
    def _calculate_query_match(self, query: str, result: Dict[str, Any]) -> float:
        """
        计算查询匹配度
        
        Args:
            query: 查询文本
            result: 结果文档
        
        Returns:
            float: 匹配度分数
        """
        query_words = set(query.lower().split())
        result_text = f"{result['question']} {result.get('answer', '')}"
        result_words = set(result_text.lower().split())
        
        if not query_words or not result_words:
            return 0.0
        
        intersection = query_words.intersection(result_words)
        return len(intersection) / len(query_words)
    
    def _calculate_search_confidence(self, search_results: List[Dict[str, Any]]) -> float:
        """
        计算搜索置信度
        
        Args:
            search_results: 搜索结果
        
        Returns:
            float: 置信度分数
        """
        if not search_results:
            return 0.0
        
        # 基于结果数量和分数
        num_results = len(search_results)
        avg_score = sum(result.get('score', 0) for result in search_results) / num_results
        
        # 结果数量因子
        quantity_factor = min(num_results / 5, 1.0)
        
        # 质量因子
        quality_factor = avg_score
        
        return (quantity_factor + quality_factor) / 2
    
    def _analyze_search_results(self, sub_question: str, search_results: List[Dict[str, Any]]) -> str:
        """
        分析搜索结果
        
        Args:
            sub_question: 子问题
            search_results: 搜索结果
        
        Returns:
            str: 分析结果
        """
        if not search_results:
            return "未找到相关信息"
        
        try:
            # 构建信息文本
            information_parts = []
            for result in search_results[:3]:  # 只使用前3个结果
                if result.get('answer'):
                    information_parts.append(result['answer'])
            
            information = "\n".join(information_parts)
            
            # 使用LLM分析
            prompt = self.reasoning_templates['analyze'].format(
                question=sub_question,
                information=information
            )
            
            analysis = ollama_client.generate_text(prompt)
            return analysis
            
        except Exception as e:
            logger.warning(f"⚠️ 搜索结果分析失败: {e}")
            # 备用方法：简单拼接
            return "\n".join([result.get('answer', '') for result in search_results[:2]])
    
    def _synthesize_answer(self, 
                          question: str, 
                          all_evidence: List[Dict[str, Any]], 
                          reasoning_steps: List[ReasoningStep]) -> Tuple[str, float]:
        """
        综合推理生成答案
        
        Args:
            question: 原始问题
            all_evidence: 所有证据
            reasoning_steps: 推理步骤
        
        Returns:
            Tuple[str, float]: (答案, 置信度)
        """
        try:
            # 构建信息文本
            information_parts = []
            
            # 添加搜索证据
            for evidence in all_evidence[:5]:  # 限制证据数量
                if evidence.get('answer'):
                    information_parts.append(f"证据: {evidence['answer']}")
            
            # 添加推理步骤结果
            for step in reasoning_steps:
                if step.step_type == 'analyze' and step.result:
                    information_parts.append(f"分析: {step.result}")
            
            information = "\n\n".join(information_parts)
            
            # 使用LLM综合
            prompt = self.reasoning_templates['synthesize'].format(
                question=question,
                information=information
            )
            
            answer = ollama_client.generate_text(prompt)
            
            # 计算置信度
            confidence = self._calculate_synthesis_confidence(all_evidence, reasoning_steps)
            
            return answer, confidence
            
        except Exception as e:
            logger.warning(f"⚠️ 答案综合失败: {e}")
            # 备用方法：使用最佳证据
            if all_evidence:
                best_evidence = max(all_evidence, key=lambda x: x.get('score', 0))
                return best_evidence.get('answer', ''), 0.5
            else:
                return ollama_client.answer_question(question), 0.3
    
    def _calculate_synthesis_confidence(self, 
                                      all_evidence: List[Dict[str, Any]], 
                                      reasoning_steps: List[ReasoningStep]) -> float:
        """
        计算综合置信度
        
        Args:
            all_evidence: 所有证据
            reasoning_steps: 推理步骤
        
        Returns:
            float: 置信度分数
        """
        confidence_factors = []
        
        # 证据质量因子
        if all_evidence:
            evidence_scores = [evidence.get('score', 0) for evidence in all_evidence]
            avg_evidence_score = sum(evidence_scores) / len(evidence_scores)
            confidence_factors.append(avg_evidence_score)
        
        # 推理步骤置信度
        step_confidences = [step.confidence for step in reasoning_steps if step.confidence > 0]
        if step_confidences:
            avg_step_confidence = sum(step_confidences) / len(step_confidences)
            confidence_factors.append(avg_step_confidence)
        
        # 证据数量因子
        evidence_count_factor = min(len(all_evidence) / 5, 1.0)
        confidence_factors.append(evidence_count_factor)
        
        # 推理步骤数量因子
        step_count_factor = min(len(reasoning_steps) / 5, 1.0)
        confidence_factors.append(step_count_factor)
        
        # 综合置信度
        if confidence_factors:
            return sum(confidence_factors) / len(confidence_factors)
        else:
            return 0.5
    
    def _self_reflect(self, question: str, answer: str) -> Dict[str, Any]:
        """
        自我反思
        
        Args:
            question: 问题
            answer: 答案
        
        Returns:
            Dict[str, Any]: 反思结果
        """
        try:
            prompt = self.reasoning_templates['reflect'].format(
                question=question,
                answer=answer
            )
            
            reflection_text = ollama_client.generate_text(prompt)
            
            # 解析反思结果
            reflection = {
                'reflection_text': reflection_text,
                'needs_improvement': False,
                'accuracy_score': 7.0,
                'completeness_score': 7.0,
                'relevance_score': 7.0
            }
            
            # 简单的分数提取
            scores = re.findall(r'(\d+(?:\.\d+)?)', reflection_text)
            if len(scores) >= 3:
                reflection['accuracy_score'] = float(scores[0])
                reflection['completeness_score'] = float(scores[1])
                reflection['relevance_score'] = float(scores[2])
                
                # 判断是否需要改进
                avg_score = (reflection['accuracy_score'] + 
                           reflection['completeness_score'] + 
                           reflection['relevance_score']) / 3
                
                reflection['needs_improvement'] = avg_score < 7.0
            
            return reflection
            
        except Exception as e:
            logger.warning(f"⚠️ 自我反思失败: {e}")
            return {
                'reflection_text': '反思失败',
                'needs_improvement': False,
                'accuracy_score': 7.0,
                'completeness_score': 7.0,
                'relevance_score': 7.0
            }
    
    def _improve_answer(self, question: str, answer: str, reflection: Dict[str, Any]) -> Tuple[str, float]:
        """
        改进答案
        
        Args:
            question: 问题
            answer: 原答案
            reflection: 反思结果
        
        Returns:
            Tuple[str, float]: (改进后的答案, 置信度)
        """
        try:
            improvement_prompt = f"""
请根据以下反思意见改进答案：

问题: {question}
原答案: {answer}
反思意见: {reflection.get('reflection_text', '')}

请提供改进后的答案。
"""
            
            improved_answer = ollama_client.generate_text(improvement_prompt)
            
            # 计算改进后的置信度
            avg_score = (reflection.get('accuracy_score', 7) + 
                        reflection.get('completeness_score', 7) + 
                        reflection.get('relevance_score', 7)) / 3
            
            improved_confidence = min(avg_score / 10 + 0.1, 1.0)
            
            return improved_answer, improved_confidence
            
        except Exception as e:
            logger.warning(f"⚠️ 答案改进失败: {e}")
            return answer, 0.6
    
    def _extract_keywords(self, text: str) -> List[str]:
        """
        提取关键词
        
        Args:
            text: 输入文本
        
        Returns:
            List[str]: 关键词列表
        """
        # 简单的关键词提取
        words = re.findall(r'\b\w+\b', text.lower())
        
        # 过滤停用词
        stop_words = {'的', '是', '在', '有', '和', '与', '或', '但', '如果', '因为', '所以', '什么', '怎么', '为什么', '哪里', '谁', '如何'}
        keywords = [word for word in words if word not in stop_words and len(word) > 1]
        
        return list(set(keywords))  # 去重
    
    def _extract_concepts(self, text: str) -> List[str]:
        """
        提取概念
        
        Args:
            text: 输入文本
        
        Returns:
            List[str]: 概念列表
        """
        concepts = []
        
        # 预定义的概念模式
        concept_patterns = [
            r'机器学习|深度学习|人工智能|神经网络|算法',
            r'数据库|系统|网络|服务器|软件|硬件',
            r'管理|运营|营销|财务|人力资源',
            r'教育|培训|学习|知识|技能',
            r'健康|医疗|治疗|诊断|药物'
        ]
        
        for pattern in concept_patterns:
            matches = re.findall(pattern, text)
            concepts.extend(matches)
        
        return list(set(concepts))  # 去重
    
    def _load_cache(self) -> None:
        """
        加载缓存
        """
        try:
            if os.path.exists(self.cache_path):
                with open(self.cache_path, 'r', encoding='utf-8') as f:
                    self.reasoning_cache = json.load(f)
                logger.info(f"📥 加载多步推理缓存: {len(self.reasoning_cache)} 条记录")
        except Exception as e:
            logger.warning(f"⚠️ 加载多步推理缓存失败: {e}")
            self.reasoning_cache = {}
    
    def _save_cache(self) -> None:
        """
        保存缓存
        """
        try:
            os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
            with open(self.cache_path, 'w', encoding='utf-8') as f:
                json.dump(self.reasoning_cache, f, ensure_ascii=False, indent=2)
            logger.info(f"💾 保存多步推理缓存: {len(self.reasoning_cache)} 条记录")
        except Exception as e:
            logger.warning(f"⚠️ 保存多步推理缓存失败: {e}")


if __name__ == "__main__":
    # 测试多步推理检索器
    config = {
        "max_reasoning_steps": 5,
        "min_confidence_threshold": 0.6,
        "decomposition_method": "llm",
        "enable_self_reflection": True
    }
    
    retriever = MultiStepRetriever(config)
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": "AI"
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": "AI"
        }
    ]
    
    # 初始化
    if retriever.initialize(test_data):
        # 测试检索
        result = retriever.retrieve("如何理解机器学习和深度学习的关系？")
        print(f"检索结果: {result.to_dict()}")
        
        # 性能统计
        stats = retriever.get_performance_stats()
        print(f"性能统计: {stats}")