
# pip install --upgrade --quiet  faiss_cpu -i https://pypi.tuna.tsinghua.edu.cn/simple/
# pip install flashrank -i https://pypi.tuna.tsinghua.edu.cn/simple/

def pretty_print_docs(docs):
    print(
        f"\n{'-' * 100}\n".join(
            [
                f"Document {i + 1}:\n\n{d.page_content}\nMetadata: {d.metadata}"
                for i, d in enumerate(docs)
            ]
        )
    )

from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
# from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_ollama import OllamaEmbeddings

documents = TextLoader( "../../agent/new_stu_syhg.txt").load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
for idx, text in enumerate(texts):
    text.metadata["id"] = idx

# embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
embedding = OllamaEmbeddings(model="bge-m3:latest")
faiss = FAISS.from_documents(texts, embedding)

def normal_retrieve(query):
    retriever = faiss.as_retriever(search_kwargs={"k": 5})
    docs = retriever.invoke(query)
    pretty_print_docs(docs)

from langchain.retrievers import ContextualCompressionRetriever
from langchain_community.document_compressors import FlashrankRerank
from langchain_ollama import ChatOllama
llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False)

def flashrerank_retrieve(query):
    retriever = faiss.as_retriever(search_kwargs={"k": 5})
    # 重排序：将最相关的推向前列, 使用Flashrank进行重排序
    # 1. 定义一个重排序压缩器, must flashrank.Ranker:Downloading ms-marco-MultiBERT-L-12... from HF
    compressor = FlashrankRerank(top_n=3)
    # 2. 创建一个 ContextualCompressionRetriever, 使用 Flashrank 作为压缩器
    compression_retriever = ContextualCompressionRetriever(
        base_compressor=compressor, base_retriever=retriever
    )
    compressed_docs = compression_retriever.invoke(
        "What did the president say about Ketanji Jackson Brown"
    )
    for doc in compressed_docs:
        print(doc.metadata["id"], doc.metadata["relevance_score"])

    # QA reranking with FlashRank
    # from langchain.chains import RetrievalQA
    # chain = RetrievalQA.from_chain_type(llm=llm, retriever=compression_retriever)
    # chain.invoke(query)


from langchain.retrievers.document_compressors import LLMChainExtractor

# 上下文压缩：聚焦核心，降低成本
def llmchain_extractor_retrieve(query):
    # 1. 定义一个基础检索器(先多检索一些,再压缩)
    base_retriever_for_comp = faiss.as_retriever(search_kwargs={"k": 5})
    # 2. 定义一个 LLMChainExtractor (压缩器)
    compressor = LLMChainExtractor.from_llm(llm=llm)
    # 3. 创建 ContextualCompressionRetriever
    compression_retriever = ContextualCompressionRetriever(
        base_compressor=compressor, base_retriever=base_retriever_for_comp
    )
    # 4. 使用query_comp = "LangChain的调试工具叫什么?它的主要作用是什么?"
    retrieved_compressed_docs = compression_retriever.invoke(query)
    print(f"对查询 '{query}' 的ContextualCompressionRetriever 检索结果:")
    for i, doc in enumerate(retrieved_compressed_docs):
        original_len = len(doc.metadata.get('original_content', doc.page_content))
        compressed_len = len(doc.page_content) 
        print(f"文档 {i+1}(原始长度: {original_len}, 压缩后长度: {compressed_len}):")
        print(doc.page_content)
        print("-" * 30)


if __name__ == "__main__":
    query = "学校是如何管理学生的？"
    # normal_retrieve(query)
    llmchain_extractor_retrieve(query)
