# 提出问题--->文档加载--->文档分割--->embeddings词向量--->存储至向量数据库--->向量数据库根据问题检索数据--->生成含Prompt的问题--->将问题传递给LLM--->LLM给出答案

# 检索(Retrieval) - 检索是我们的检索增强生成(RAG)流程的核心。

import os
import openai
import sys
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor

sys.path.append('../..')
api_key = "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"
os.environ['OPENAI_API_KEY'] = "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"

persist_directory = 'docs/chroma/cs229_lectures/'


# 解决无多样性：使用边际相关性(MMR)搜索
def retrieval_example_1():
    # 初始化embeddings
    embedding = OpenAIEmbeddings()
    # 初始化向量数据库
    vectordb = Chroma(
        persist_directory=persist_directory,
        embedding_function=embedding
    )
    # 提问
    question = "what did they say about matlab?"
    # 相似性搜索
    docs_ss = vectordb.similarity_search(question, k=3)
    print(docs_ss[0].page_content[:100])
    print(docs_ss[1].page_content[:100])
    # 最大边际相关性(MMR)搜索-解决多样性
    docs_mmr = vectordb.max_marginal_relevance_search(question, k=3)
    print(docs_mmr[0].page_content[:100])
    print(docs_mmr[1].page_content[:100])


# 解决特殊性：使用元数据
def retrieval_example_2():
    # 初始化embeddings
    embedding = OpenAIEmbeddings()
    # 初始化向量数据库
    vectordb = Chroma(
        persist_directory=persist_directory,
        embedding_function=embedding
    )

    # 提问
    question = "what did they say about regression in the third lecture?"
    # 相似性搜索+元数据过滤器
    docs = vectordb.similarity_search(
        question,
        k=3,
        filter={"source": "docs/cs229_lectures/MachineLearning-Lecture03.pdf"}  # 我们会指定一个元数据过滤器`filter`
    )
    for d in docs:
        print(d.metadata)  # 接下来，我们可以看到结果都来自对应的章节


# 解决特殊性：在元数据中使用自查询检索器
def retrieval_example_3():
    # 初始化embeddings
    embedding = OpenAIEmbeddings()
    # 初始化向量数据库
    vectordb = Chroma(
        persist_directory=persist_directory,
        embedding_function=embedding
    )
    # 解决特殊性：在元数据中使用自查询检索器
    chat = OpenAI(api_key=api_key, temperature=0)
    # 指定过滤的属性（名称、描述、类型），语言模型更加此属性进行过滤
    metadata_field_info = [
        AttributeInfo(
            name="source",
            description="The lecture the chunk is from, should be one of `docs/cs229_lectures/MachineLearning-Lecture01.pdf`, `docs/cs229_lectures/MachineLearning-Lecture02.pdf`, or `docs/cs229_lectures/MachineLearning-Lecture03.pdf`",
            type="string",
        ),
        AttributeInfo(
            name="page",
            description="The page from the lecture",
            type="integer",
        ),
    ]
    # 用于描述正在检索的内容的总体性质（在本例中，是课堂笔记）。
    document_content_description = "Lecture notes"
    # 创建了一个名为 SelfQueryRetriever 的类的实例
    retriever = SelfQueryRetriever.from_llm(
        chat,
        vectordb,
        document_content_description,
        metadata_field_info,
        verbose=True
    )
    question = "what did they say about regression in the third lecture?"
    docs = retriever.get_relevant_documents(question)
    for d in docs:
        print(d.metadata)


def pretty_print_docs(docs):
    print(f"\n{'-' * 100}\n".join([f"Document {i + 1}:\n\n" + d.page_content for i, d in enumerate(docs)]))


# 其他技巧：压缩
def retrieval_example_4():
    # 初始化embeddings
    embedding = OpenAIEmbeddings()
    # 初始化向量数据库
    vectordb = Chroma(
        persist_directory=persist_directory,
        embedding_function=embedding
    )
    # 初始化LLM
    chat = OpenAI(api_key=api_key, temperature=0)
    # 压缩器
    compressor = LLMChainExtractor.from_llm(chat)

    compression_retriever = ContextualCompressionRetriever(
        base_compressor=compressor,
        # base_retriever=vectordb.as_retriever() # 默认搜索类型为相似性搜索
        base_retriever=vectordb.as_retriever(search_type="mmr")  # 可以将搜索类型设置为MMR。
    )
    question = "what did they say about matlab?"
    compressed_docs = compression_retriever.get_relevant_documents(question)
    pretty_print_docs(compressed_docs)


if __name__ == '__main__':
    # retrieval_example_1()
    # retrieval_example_2()
    # retrieval_example_3()
    retrieval_example_4()
