# Helper function for printing docs
from langchain.document_loaders import TextLoader
from langchain.document_transformers import EmbeddingsRedundantFilter
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms.openai import OpenAI
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor, LLMChainFilter, EmbeddingsFilter, \
    DocumentCompressorPipeline
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
documents = TextLoader("../document_transformers/files/state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
llm = OpenAI(temperature=0)


def pretty_print_docs(docs):
    print(f"\n{'-' * 100}\n".join([f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]))


def using_a_vanilla_vector_store_retriever():
    # 让我们从初始化一个简单的向量存储检索器并存储 2023 年国情咨文演讲（以分块方式）开始。我们可以看到，给定一个示例问题，我们的检索器返回一个或两个相关文档和几个不相关的文档。而且即使相关的文档中也有很多不相关的信息。
    docs = retriever.get_relevant_documents("What did the president say about Kentanji Brown Jackson")
    pretty_print_docs(docs)


def add_contextual_compression_with_llm_chain():
    # 现在让我们用 ContextualCompressionRetriever 包装我们的基本检索器。我们将添加一个 LLMChainExtractor，它将迭代最初返回的文档，并从每个文档中提取与查询相关的内容。
    compressor = LLMChainExtractor.from_llm(llm)
    compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=retriever)
    compressed_docs = compression_retriever.get_relevant_documents(
        "What did the president say about Kentanji Jacson Brown")
    pretty_print_docs(compressed_docs)


def use_compressors_filters():
    # LLMChainFilter 是一个稍微简单但更健壮的压缩器，它使用 LLM 链来决定哪些最初检索到的文档要被过滤掉，哪些要返回，而无需操作文档内容。
    _filter = LLMChainFilter.from_llm(llm)
    compression_retriever = ContextualCompressionRetriever(base_compressor=_filter, base_retriever=retriever)
    compressed_docs = compression_retriever.get_relevant_documents(
        "What did the president say about Ketanji Jackson Brown")
    pretty_print_docs(compressed_docs)


def use_embeddings_filter():
    # 对每个检索到的文档进行额外的 LLM 调用是昂贵且缓慢的。EmbeddingsFilter 提供了一种更便宜且更快速的选择，它通过嵌入文档和查询，只返回与查询具有足够相似嵌入的文档。
    embeddings = OpenAIEmbeddings()
    embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)
    compression_retriever = ContextualCompressionRetriever(base_compressor=embeddings_filter, base_retriever=retriever)
    compressed_docs = compression_retriever.get_relevant_documents(
        "What did the president say about Ketanji Jackson Brown")
    pretty_print_docs(compressed_docs)


def use_stringing_compressors_with_document_transformers():
    # 使用 DocumentCompressorPipeline 我们还可以轻松地将多个压缩器按顺序组合起来。除了压缩器，我们还可以将 BaseDocumentTransformer
    # 添加到我们的管道中，它们不执行任何上下文压缩，只是在一组文档上执行一些转换。例如，TextSplitter 可以用作文档转换器，将文档分割成较小的片段，而 EmbeddingsRedundantFilter
    # 可以基于文档之间的嵌入相似性过滤掉冗余文档
    embeddings = OpenAIEmbeddings()
    splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ")
    redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
    relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)
    pipeline_compressor = DocumentCompressorPipeline(
        transformers=[splitter, redundant_filter, relevant_filter]
    )
    compression_retriever = ContextualCompressionRetriever(base_compressor=pipeline_compressor,
                                                           base_retriever=retriever)

    compressed_docs = compression_retriever.get_relevant_documents(
        "What did the president say about Ketanji Jackson Brown")
    pretty_print_docs(compressed_docs)


if __name__ == '__main__':
    use_stringing_compressors_with_document_transformers()