# -*- coding: utf-8 -*-
"""
混合检索器
结合向量检索和关键词检索的优势
"""

from typing import Dict, List, Any, Tuple
from loguru import logger
import numpy as np

from .base_retriever import BaseRetriever, RetrievalResult
from .vector_retriever import VectorRetriever
from .keyword_retriever import KeywordRetriever
from ..ollama_client import ollama_client


class HybridRetriever(BaseRetriever):
    """混合检索器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化混合检索器
        
        Args:
            config: 配置参数
        """
        super().__init__("HybridRetriever", config)
        
        # 配置参数
        self.vector_weight = self.config.get("vector_weight", 0.6)
        self.keyword_weight = self.config.get("keyword_weight", 0.4)
        self.fusion_method = self.config.get("fusion_method", "weighted_sum")  # weighted_sum, rrf, comb_sum
        self.min_confidence = self.config.get("min_confidence", 0.3)
        self.use_llm_rerank = self.config.get("use_llm_rerank", True)
        
        # 子检索器配置
        vector_config = self.config.get("vector_config", {
            "embedding_method": "tfidf",
            "similarity_threshold": 0.1,
            "cache_path": "data/hybrid_vector_cache.pkl"
        })
        
        keyword_config = self.config.get("keyword_config", {
            "match_threshold": 0.2,
            "use_fuzzy_match": True,
            "cache_path": "data/hybrid_keyword_cache.json"
        })
        
        # 初始化子检索器
        self.vector_retriever = VectorRetriever(vector_config)
        self.keyword_retriever = KeywordRetriever(keyword_config)
        
        logger.info(f"🔍 混合检索器初始化完成，融合方法: {self.fusion_method}")
        logger.info(f"📊 权重配置 - 向量: {self.vector_weight}, 关键词: {self.keyword_weight}")
    
    def initialize(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化混合检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info(f"📊 开始初始化混合检索器，数据量: {len(data)}")
            
            # 初始化向量检索器
            logger.info("🔧 初始化向量检索器...")
            vector_success = self.vector_retriever.initialize(data)
            
            # 初始化关键词检索器
            logger.info("🔧 初始化关键词检索器...")
            keyword_success = self.keyword_retriever.initialize(data)
            
            if vector_success and keyword_success:
                self.is_initialized = True
                logger.info("✅ 混合检索器初始化成功")
                return True
            else:
                logger.error(f"❌ 混合检索器初始化失败 - 向量: {vector_success}, 关键词: {keyword_success}")
                return False
                
        except Exception as e:
            logger.error(f"❌ 混合检索器初始化异常: {e}")
            return False
    
    def retrieve(self, question: str, top_k: int = 5) -> RetrievalResult:
        """
        执行混合检索
        
        Args:
            question: 查询问题
            top_k: 返回结果数量
        
        Returns:
            RetrievalResult: 检索结果
        """
        if not self.is_initialized:
            logger.error("❌ 混合检索器未初始化")
            return RetrievalResult(question, "", 0.0, 0.0)
        
        try:
            # 测量检索时间
            def _retrieve_internal():
                # 执行向量检索
                vector_result = self.vector_retriever.retrieve(question, top_k * 2)
                
                # 执行关键词检索
                keyword_result = self.keyword_retriever.retrieve(question, top_k * 2)
                
                # 融合检索结果
                fused_docs = self._fuse_results(
                    vector_result.retrieved_docs,
                    keyword_result.retrieved_docs,
                    top_k
                )
                
                # 生成最终答案
                answer, confidence = self._generate_final_answer(
                    question, fused_docs, vector_result, keyword_result
                )
                
                return answer, confidence, fused_docs, vector_result, keyword_result
            
            result_data, response_time = self._measure_time(_retrieve_internal)
            answer, confidence, fused_docs, vector_result, keyword_result = result_data
            
            # 更新成功计数
            if answer:
                self.success_count += 1
            
            return RetrievalResult(
                question=question,
                answer=answer,
                confidence=confidence,
                response_time=response_time,
                retrieved_docs=fused_docs,
                metadata={
                    'method': 'hybrid',
                    'fusion_method': self.fusion_method,
                    'vector_weight': self.vector_weight,
                    'keyword_weight': self.keyword_weight,
                    'vector_results': len(vector_result.retrieved_docs),
                    'keyword_results': len(keyword_result.retrieved_docs),
                    'fused_results': len(fused_docs),
                    'vector_time': vector_result.response_time,
                    'keyword_time': keyword_result.response_time
                }
            )
            
        except Exception as e:
            logger.error(f"❌ 混合检索失败: {e}")
            return RetrievalResult(question, "", 0.0, 0.0)
    
    def _fuse_results(self, 
                     vector_docs: List[Dict], 
                     keyword_docs: List[Dict], 
                     top_k: int) -> List[Dict]:
        """
        融合向量检索和关键词检索的结果
        
        Args:
            vector_docs: 向量检索结果
            keyword_docs: 关键词检索结果
            top_k: 返回结果数量
        
        Returns:
            List[Dict]: 融合后的结果
        """
        if self.fusion_method == "weighted_sum":
            return self._weighted_sum_fusion(vector_docs, keyword_docs, top_k)
        elif self.fusion_method == "rrf":
            return self._reciprocal_rank_fusion(vector_docs, keyword_docs, top_k)
        elif self.fusion_method == "comb_sum":
            return self._comb_sum_fusion(vector_docs, keyword_docs, top_k)
        else:
            logger.warning(f"⚠️ 未知的融合方法: {self.fusion_method}，使用默认方法")
            return self._weighted_sum_fusion(vector_docs, keyword_docs, top_k)
    
    def _weighted_sum_fusion(self, 
                           vector_docs: List[Dict], 
                           keyword_docs: List[Dict], 
                           top_k: int) -> List[Dict]:
        """
        加权求和融合
        
        Args:
            vector_docs: 向量检索结果
            keyword_docs: 关键词检索结果
            top_k: 返回结果数量
        
        Returns:
            List[Dict]: 融合后的结果
        """
        # 收集所有文档
        all_docs = {}
        
        # 处理向量检索结果
        for i, doc in enumerate(vector_docs):
            doc_id = doc.get('id')
            if doc_id is not None:
                vector_score = doc.get('similarity', 0.0)
                # 位置权重：排名越靠前权重越高
                position_weight = 1.0 / (i + 1)
                weighted_score = self.vector_weight * vector_score * position_weight
                
                all_docs[doc_id] = {
                    'doc': doc,
                    'vector_score': vector_score,
                    'keyword_score': 0.0,
                    'final_score': weighted_score,
                    'vector_rank': i + 1,
                    'keyword_rank': None
                }
        
        # 处理关键词检索结果
        for i, doc in enumerate(keyword_docs):
            doc_id = doc.get('id')
            if doc_id is not None:
                keyword_score = doc.get('score', 0.0)
                position_weight = 1.0 / (i + 1)
                weighted_score = self.keyword_weight * keyword_score * position_weight
                
                if doc_id in all_docs:
                    # 文档已存在，更新分数
                    all_docs[doc_id]['keyword_score'] = keyword_score
                    all_docs[doc_id]['final_score'] += weighted_score
                    all_docs[doc_id]['keyword_rank'] = i + 1
                else:
                    # 新文档
                    all_docs[doc_id] = {
                        'doc': doc,
                        'vector_score': 0.0,
                        'keyword_score': keyword_score,
                        'final_score': weighted_score,
                        'vector_rank': None,
                        'keyword_rank': i + 1
                    }
        
        # 排序并返回top-k结果
        sorted_docs = sorted(all_docs.values(), key=lambda x: x['final_score'], reverse=True)
        
        fused_results = []
        for item in sorted_docs[:top_k]:
            doc = item['doc'].copy()
            doc['fusion_score'] = item['final_score']
            doc['vector_score'] = item['vector_score']
            doc['keyword_score'] = item['keyword_score']
            doc['vector_rank'] = item['vector_rank']
            doc['keyword_rank'] = item['keyword_rank']
            fused_results.append(doc)
        
        return fused_results
    
    def _reciprocal_rank_fusion(self, 
                              vector_docs: List[Dict], 
                              keyword_docs: List[Dict], 
                              top_k: int, 
                              k: int = 60) -> List[Dict]:
        """
        倒数排名融合 (Reciprocal Rank Fusion)
        
        Args:
            vector_docs: 向量检索结果
            keyword_docs: 关键词检索结果
            top_k: 返回结果数量
            k: RRF参数
        
        Returns:
            List[Dict]: 融合后的结果
        """
        all_docs = {}
        
        # 处理向量检索结果
        for rank, doc in enumerate(vector_docs, 1):
            doc_id = doc.get('id')
            if doc_id is not None:
                rrf_score = 1.0 / (k + rank)
                all_docs[doc_id] = {
                    'doc': doc,
                    'rrf_score': rrf_score * self.vector_weight,
                    'vector_rank': rank,
                    'keyword_rank': None
                }
        
        # 处理关键词检索结果
        for rank, doc in enumerate(keyword_docs, 1):
            doc_id = doc.get('id')
            if doc_id is not None:
                rrf_score = 1.0 / (k + rank)
                
                if doc_id in all_docs:
                    all_docs[doc_id]['rrf_score'] += rrf_score * self.keyword_weight
                    all_docs[doc_id]['keyword_rank'] = rank
                else:
                    all_docs[doc_id] = {
                        'doc': doc,
                        'rrf_score': rrf_score * self.keyword_weight,
                        'vector_rank': None,
                        'keyword_rank': rank
                    }
        
        # 排序并返回结果
        sorted_docs = sorted(all_docs.values(), key=lambda x: x['rrf_score'], reverse=True)
        
        fused_results = []
        for item in sorted_docs[:top_k]:
            doc = item['doc'].copy()
            doc['rrf_score'] = item['rrf_score']
            doc['vector_rank'] = item['vector_rank']
            doc['keyword_rank'] = item['keyword_rank']
            fused_results.append(doc)
        
        return fused_results
    
    def _comb_sum_fusion(self, 
                        vector_docs: List[Dict], 
                        keyword_docs: List[Dict], 
                        top_k: int) -> List[Dict]:
        """
        CombSUM融合方法
        
        Args:
            vector_docs: 向量检索结果
            keyword_docs: 关键词检索结果
            top_k: 返回结果数量
        
        Returns:
            List[Dict]: 融合后的结果
        """
        all_docs = {}
        
        # 归一化分数
        def normalize_scores(docs, score_key):
            if not docs:
                return
            scores = [doc.get(score_key, 0.0) for doc in docs]
            max_score = max(scores) if scores else 1.0
            min_score = min(scores) if scores else 0.0
            score_range = max_score - min_score if max_score > min_score else 1.0
            
            for doc in docs:
                original_score = doc.get(score_key, 0.0)
                normalized_score = (original_score - min_score) / score_range
                doc[f'normalized_{score_key}'] = normalized_score
        
        # 归一化分数
        normalize_scores(vector_docs, 'similarity')
        normalize_scores(keyword_docs, 'score')
        
        # 处理向量检索结果
        for doc in vector_docs:
            doc_id = doc.get('id')
            if doc_id is not None:
                normalized_score = doc.get('normalized_similarity', 0.0)
                all_docs[doc_id] = {
                    'doc': doc,
                    'comb_score': self.vector_weight * normalized_score,
                    'vector_score': normalized_score,
                    'keyword_score': 0.0
                }
        
        # 处理关键词检索结果
        for doc in keyword_docs:
            doc_id = doc.get('id')
            if doc_id is not None:
                normalized_score = doc.get('normalized_score', 0.0)
                
                if doc_id in all_docs:
                    all_docs[doc_id]['comb_score'] += self.keyword_weight * normalized_score
                    all_docs[doc_id]['keyword_score'] = normalized_score
                else:
                    all_docs[doc_id] = {
                        'doc': doc,
                        'comb_score': self.keyword_weight * normalized_score,
                        'vector_score': 0.0,
                        'keyword_score': normalized_score
                    }
        
        # 排序并返回结果
        sorted_docs = sorted(all_docs.values(), key=lambda x: x['comb_score'], reverse=True)
        
        fused_results = []
        for item in sorted_docs[:top_k]:
            doc = item['doc'].copy()
            doc['comb_score'] = item['comb_score']
            doc['normalized_vector_score'] = item['vector_score']
            doc['normalized_keyword_score'] = item['keyword_score']
            fused_results.append(doc)
        
        return fused_results
    
    def _generate_final_answer(self, 
                             question: str, 
                             fused_docs: List[Dict],
                             vector_result: RetrievalResult,
                             keyword_result: RetrievalResult) -> Tuple[str, float]:
        """
        生成最终答案
        
        Args:
            question: 原始问题
            fused_docs: 融合后的文档
            vector_result: 向量检索结果
            keyword_result: 关键词检索结果
        
        Returns:
            Tuple[str, float]: (答案, 置信度)
        """
        if not fused_docs:
            # 如果没有检索到文档，使用LLM直接回答
            answer = ollama_client.answer_question(question)
            return answer, 0.3
        
        # 获取最佳文档的答案
        best_doc = fused_docs[0]
        answer = best_doc.get('answer', '')
        
        # 计算置信度
        confidence = self._calculate_confidence(best_doc, vector_result, keyword_result)
        
        # 如果置信度较低且启用了LLM重排，使用LLM增强答案
        if confidence < self.min_confidence and self.use_llm_rerank:
            context = "\n".join([doc.get('answer', '') for doc in fused_docs[:3]])
            llm_answer = ollama_client.answer_question(question, context)
            
            if llm_answer and len(llm_answer) > len(answer):
                answer = llm_answer
                confidence = min(confidence + 0.3, 1.0)
        
        return answer, confidence
    
    def _calculate_confidence(self, 
                            best_doc: Dict,
                            vector_result: RetrievalResult,
                            keyword_result: RetrievalResult) -> float:
        """
        计算置信度
        
        Args:
            best_doc: 最佳文档
            vector_result: 向量检索结果
            keyword_result: 关键词检索结果
        
        Returns:
            float: 置信度分数
        """
        # 基础置信度来自融合分数
        base_confidence = 0.0
        
        if 'fusion_score' in best_doc:
            base_confidence = min(best_doc['fusion_score'], 1.0)
        elif 'rrf_score' in best_doc:
            base_confidence = min(best_doc['rrf_score'] * 10, 1.0)  # RRF分数通常较小
        elif 'comb_score' in best_doc:
            base_confidence = min(best_doc['comb_score'], 1.0)
        
        # 考虑子检索器的置信度
        vector_confidence = vector_result.confidence
        keyword_confidence = keyword_result.confidence
        
        # 加权平均
        combined_confidence = (
            self.vector_weight * vector_confidence + 
            self.keyword_weight * keyword_confidence
        )
        
        # 最终置信度是基础置信度和组合置信度的平均值
        final_confidence = (base_confidence + combined_confidence) / 2
        
        return min(final_confidence, 1.0)
    
    def get_performance_stats(self) -> Dict[str, Any]:
        """
        获取性能统计信息
        
        Returns:
            Dict: 性能统计数据
        """
        base_stats = super().get_performance_stats()
        
        # 添加子检索器的统计信息
        base_stats['vector_stats'] = self.vector_retriever.get_performance_stats()
        base_stats['keyword_stats'] = self.keyword_retriever.get_performance_stats()
        base_stats['fusion_method'] = self.fusion_method
        base_stats['weights'] = {
            'vector': self.vector_weight,
            'keyword': self.keyword_weight
        }
        
        return base_stats


if __name__ == "__main__":
    # 测试混合检索器
    config = {
        "vector_weight": 0.6,
        "keyword_weight": 0.4,
        "fusion_method": "weighted_sum",
        "use_llm_rerank": True
    }
    
    retriever = HybridRetriever(config)
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": "AI",
            "keywords": ["机器学习", "人工智能", "算法"]
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": "AI",
            "keywords": ["深度学习", "神经网络", "学习"]
        }
    ]
    
    # 初始化
    if retriever.initialize(test_data):
        # 测试检索
        result = retriever.retrieve("机器学习是什么？")
        print(f"检索结果: {result.to_dict()}")
        
        # 性能统计
        stats = retriever.get_performance_stats()
        print(f"性能统计: {stats}")