from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import AzureChatOpenAI
from langchain_community.vectorstores import Chroma
from langchain_openai.embeddings import AzureOpenAIEmbeddings  # 导入嵌入模型
from tool import get_azure_endpoint,get_api_version,get_api_key
def pretty_print_docs(docs):
    print(f"\n{'-' * 100}\n".join([f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]))

if __name__ == '__main__':
    llm = AzureChatOpenAI(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        azure_deployment="gpt-4o-mini",  # 重命名为 azure_deployment
        openai_api_version=get_api_version(),  # 参数名不变
        openai_api_key=get_api_key(),
        openai_api_type="azure",
    )
    embedding = AzureOpenAIEmbeddings(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        model="text-embedding-3-small",  # 重命名为 azure_deployment
        api_key=get_api_key(),
        api_version=get_api_version()
    )
    vectordb_chinese = Chroma(
        persist_directory="./docs/chroma/matplotlib",
        embedding_function=embedding,
    )
    compressor = LLMChainExtractor.from_llm(llm) # 压缩器
    compression_retriever_chinese = ContextualCompressionRetriever(
        base_compressor=compressor,
        base_retriever=vectordb_chinese.as_retriever()
    )
    # 对源文档进行压缩
    question_chinese = "Matplotlib是什么?"
    compressed_docs_chinese = compression_retriever_chinese.get_relevant_documents(question_chinese)
    pretty_print_docs(compressed_docs_chinese)

    compression_retriever_chinese1 = ContextualCompressionRetriever(
        base_compressor=compressor,
        base_retriever=vectordb_chinese.as_retriever(search_type="mmr")
    )
    question_chinese = "Matplotlib是什么?"
    compressed_docs_chinese = compression_retriever_chinese1.get_relevant_documents(question_chinese)
    pretty_print_docs(compressed_docs_chinese)