# Enhanced Retrieval System with Vector + BM25 + LLM Reranking
from pathlib import Path
from typing import List, Dict, Union
import time
import logging
from src.retrieval import VectorRetriever, BM25Retriever, LLMReranker

_log = logging.getLogger(__name__)

class EnhancedHybridRetriever:
    """
    增强的混合检索器：支持向量+BM25+LLM重排的三重混合搜索
    """
    
    def __init__(self, vector_db_dir: Path, bm25_db_dir: Path, documents_dir: Path):
        """
        初始化增强混合检索器
        
        Args:
            vector_db_dir: 向量数据库目录
            bm25_db_dir: BM25数据库目录  
            documents_dir: 文档目录
        """
        self.vector_retriever = VectorRetriever(vector_db_dir, documents_dir)
        self.bm25_retriever = BM25Retriever(bm25_db_dir, documents_dir)
        self.reranker = LLMReranker()
        
    def retrieve_by_company_name(
        self, 
        company_name: str, 
        query: str, 
        llm_reranking_sample_size: int = None,  # 兼容性参数
        top_n: int = None,  # 兼容性参数
        vector_top_n: int = 14,
        bm25_top_n: int = 14,
        final_top_n: int = 6,
        vector_weight: float = 0.6,
        bm25_weight: float = 0.4,
        llm_weight: float = 0.7,
        documents_batch_size: int = 10,
        return_parent_pages: bool = False
    ) -> List[Dict]:
        """
        使用向量+BM25+LLM重排的三重混合检索
        
        Args:
            company_name: 公司名称
            query: 查询语句
            llm_reranking_sample_size: 兼容性参数，用于设置候选数量
            top_n: 兼容性参数，用于设置最终返回数量
            vector_top_n: 向量检索候选数量
            bm25_top_n: BM25检索候选数量
            final_top_n: 最终返回结果数量
            vector_weight: 向量检索权重
            bm25_weight: BM25检索权重
            llm_weight: LLM重排权重
            documents_batch_size: LLM批处理大小
            return_parent_pages: 是否返回父页面
            
        Returns:
            重排后的检索结果列表
        """
        # 处理兼容性参数
        if llm_reranking_sample_size is not None:
            # 如果提供了 llm_reranking_sample_size，用它来设置候选数量
            vector_top_n = max(vector_top_n, llm_reranking_sample_size // 2)
            bm25_top_n = max(bm25_top_n, llm_reranking_sample_size // 2)
        
        if top_n is not None:
            # 如果提供了 top_n，用它来设置最终返回数量
            final_top_n = top_n
        
        t0 = time.time()
        
        # 步骤1：向量检索
        print(f"[计时] [EnhancedHybridRetriever] 开始向量检索 (top-{vector_top_n})...")
        try:
            vector_results = self.vector_retriever.retrieve_by_company_name(
                company_name=company_name,
                query=query,
                top_n=vector_top_n,
                return_parent_pages=return_parent_pages
            )
        except Exception as e:
            _log.warning(f"向量检索失败: {e}")
            vector_results = []
            
        t1 = time.time()
        print(f"[计时] [EnhancedHybridRetriever] 向量检索耗时: {t1-t0:.2f} 秒，获得 {len(vector_results)} 个结果")
        
        # 步骤2：BM25检索
        print(f"[计时] [EnhancedHybridRetriever] 开始BM25检索 (top-{bm25_top_n})...")
        try:
            bm25_results = self.bm25_retriever.retrieve_by_company_name(
                company_name=company_name,
                query=query,
                top_n=bm25_top_n,
                return_parent_pages=return_parent_pages
            )
        except Exception as e:
            _log.warning(f"BM25检索失败: {e}")
            bm25_results = []
            
        t2 = time.time()
        print(f"[计时] [EnhancedHybridRetriever] BM25检索耗时: {t2-t1:.2f} 秒，获得 {len(bm25_results)} 个结果")
        
        # 步骤3：合并和去重
        print("[计时] [EnhancedHybridRetriever] 开始合并检索结果...")
        combined_results = self._merge_and_deduplicate(
            vector_results, bm25_results, vector_weight, bm25_weight
        )
        
        t3 = time.time()
        print(f"[计时] [EnhancedHybridRetriever] 合并耗时: {t3-t2:.2f} 秒，合并后 {len(combined_results)} 个结果")
        
        # 如果合并后结果为空，返回空列表
        if not combined_results:
            print("[警告] [EnhancedHybridRetriever] 合并后无检索结果")
            return []
        
        # 步骤4：LLM重排序
        print("[计时] [EnhancedHybridRetriever] 开始LLM重排序...")
        try:
            reranked_results = self.reranker.rerank_documents(
                query=query,
                documents=combined_results,
                documents_batch_size=documents_batch_size,
                llm_weight=llm_weight
            )
        except Exception as e:
            _log.warning(f"LLM重排失败，使用原始排序: {e}")
            reranked_results = combined_results
            
        t4 = time.time()
        print(f"[计时] [EnhancedHybridRetriever] LLM重排耗时: {t4-t3:.2f} 秒")
        print(f"[计时] [EnhancedHybridRetriever] 总耗时: {t4-t0:.2f} 秒")
        
        # 返回最终结果
        final_results = reranked_results[:final_top_n]
        print(f"[EnhancedHybridRetriever] 最终返回 {len(final_results)} 个结果")
        
        return final_results
    
    def _merge_and_deduplicate(
        self, 
        vector_results: List[Dict], 
        bm25_results: List[Dict],
        vector_weight: float = 0.6,
        bm25_weight: float = 0.4
    ) -> List[Dict]:
        """
        合并向量和BM25检索结果，去重并重新计算分数
        
        Args:
            vector_results: 向量检索结果
            bm25_results: BM25检索结果
            vector_weight: 向量检索权重
            bm25_weight: BM25检索权重
            
        Returns:
            合并后的结果列表
        """
        # 使用文本内容作为去重键
        merged_docs = {}
        
        # 处理向量检索结果
        for i, doc in enumerate(vector_results):
            text = doc.get('text', '')
            if text:
                # 向量检索分数：距离越小越好，转换为相似度分数
                vector_score = 1.0 / (1.0 + doc.get('distance', 1.0))
                # 位置分数：排名越靠前分数越高
                position_score = (len(vector_results) - i) / len(vector_results)
                final_score = vector_score * 0.7 + position_score * 0.3
                
                merged_docs[text] = {
                    **doc,
                    'vector_score': final_score,
                    'bm25_score': 0.0,
                    'source': 'vector'
                }
        
        # 处理BM25检索结果
        for i, doc in enumerate(bm25_results):
            text = doc.get('text', '')
            if text:
                # BM25分数标准化
                bm25_score = doc.get('score', 0.0)
                # 位置分数
                position_score = (len(bm25_results) - i) / len(bm25_results)
                final_score = bm25_score * 0.7 + position_score * 0.3
                
                if text in merged_docs:
                    # 如果已存在，更新BM25分数和来源
                    merged_docs[text]['bm25_score'] = final_score
                    merged_docs[text]['source'] = 'both'
                else:
                    # 新文档
                    merged_docs[text] = {
                        **doc,
                        'vector_score': 0.0,
                        'bm25_score': final_score,
                        'source': 'bm25'
                    }
        
        # 计算最终分数并排序
        final_results = []
        for doc in merged_docs.values():
            # 加权合并分数
            combined_score = (
                doc['vector_score'] * vector_weight + 
                doc['bm25_score'] * bm25_weight
            )
            
            # 如果同时来自两个源，给予额外加分
            if doc['source'] == 'both':
                combined_score *= 1.2
                
            doc['combined_score'] = combined_score
            final_results.append(doc)
        
        # 按合并分数排序
        final_results.sort(key=lambda x: x['combined_score'], reverse=True)
        
        print(f"[合并统计] 向量结果: {len(vector_results)}, BM25结果: {len(bm25_results)}, "
              f"合并后: {len(final_results)}, 重叠: {len([d for d in final_results if d['source'] == 'both'])}")
        
        return final_results

    def retrieve_all(self, company_name: str) -> List[Dict]:
        """
        检索指定公司的所有文档（用于全上下文模式）
        """
        # 使用向量检索器的全文档检索功能
        return self.vector_retriever.retrieve_all(company_name)