from typing import Literal

from configs import app_config
from langchain_workflow.splitter import DocumentProcessor
from logger import logger


def get_compression_retriever(top_k, all_documents):
    """
    获取上下文压缩检索器

    参数:
    top_k (int): 重排序后保留的顶部文档数量
    all_documents (List[Document]): 所有待检索的文档列表

    返回:
    ContextualCompressionRetriever: 上下文压缩检索器实例

    功能:
    1. 初始化FAISS混合检索器
    2. 初始化重排序模型和压缩器
    3. 创建并返回上下文压缩检索器
    """
    from langchain.retrievers import ContextualCompressionRetriever
    from langchain.retrievers.document_compressors.cross_encoder_rerank import (
        CrossEncoderReranker,
    )
    from langchain_community.cross_encoders import HuggingFaceCrossEncoder

    # from langchain_workflow.rag.milvus_hybrid_retriever import retriever
    from langchain_workflow.rag.faiss_hybrid_retriever import get_faiss_hybrid_retriever

    # 获取FAISS混合检索器
    retriever = get_faiss_hybrid_retriever(all_documents)
    
    # 设置HuggingFace镜像, rerank_model第一次运行可能要下载模型文件
    import os
    os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

    # 初始化重排序模型和压缩器
    rerank_model = HuggingFaceCrossEncoder(model_name=app_config.RERANK_MODEL)
    compressor = CrossEncoderReranker(model=rerank_model, top_n=top_k)
    
    # 创建上下文压缩检索器
    compression_retriever = ContextualCompressionRetriever(
        base_compressor=compressor, base_retriever=retriever
    )
    return compression_retriever

# 初始化压缩检索器，默认查询7个
compression_retriever = get_compression_retriever(
    app_config.RERANCK_TOP_K, DocumentProcessor().get_document_pipeline(cache=True)
)


def rerank(question: str, compression_retriever):
    logger.info(f"开始重排序,问题: {question}")
    # 使用压缩检索器获取相关文档
    relevant_docs = compression_retriever.invoke(question)
    answer: Literal[""] = ""
    i = 1
    # 格式化检索结果
    for rd in relevant_docs:
        answer += f"policy{i}:\n{rd.page_content}\n"
        i += 1
    logger.info("重排序完成")
    return answer



if __name__ == "__main__":
    # 获取所有文档,不使用缓存
    documents = DocumentProcessor().get_all_documents(cache=False)
