from langchain_community.vectorstores import Chroma
from langchain_openai.embeddings import AzureOpenAIEmbeddings  # 导入嵌入模型
from langchain_community.document_loaders import PyPDFLoader
from tool import get_azure_endpoint, get_api_key, get_api_version
from langchain.memory import ConversationBufferMemory
if __name__ == '__main__':

    # 自定义内存类
    class CustomConversationBufferMemory(ConversationBufferMemory):
        def save_context(self, inputs: dict, outputs: dict) -> None:
            # 只保存 'answer' 键的值
            if 'answer' in outputs:
                super().save_context(inputs, {'answer': outputs['answer']})

    persist_directory_chinese = './docs/chroma/matplotlib/'

    embedding = AzureOpenAIEmbeddings(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        model="text-embedding-3-small",  # 重命名为 azure_deployment
        api_key=get_api_key(),
        api_version=get_api_version()
    )

    # 加载 PDF
    loaders_chinese = [
        # 故意添加重复文档，使数据混乱
        PyPDFLoader("./docs/matplotlib/第一回：Matplotlib初相识.pdf"),
        PyPDFLoader("./docs/matplotlib/第一回：Matplotlib初相识.pdf"),
        PyPDFLoader("./docs/matplotlib/第二回：艺术画笔见乾坤.pdf"),
        PyPDFLoader("./docs/matplotlib/第三回：布局格式定方圆.pdf"),
        PyPDFLoader("./docs/matplotlib/第四回：文字图例尽眉目.pdf"),
        PyPDFLoader("./docs/matplotlib/第五回：样式色彩秀芳华.pdf")
    ]
    docs = []
    for loader in loaders_chinese:
        docs.extend(loader.load())
    print(len(docs))

    # 分割文本
    from langchain.text_splitter import RecursiveCharacterTextSplitter

    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1500,  # 每个文本块的大小。这意味着每次切分文本时，会尽量使每个块包含 1500 个字符。
        chunk_overlap=150  # 每个文本块之间的重叠部分。
    )
    splits = text_splitter.split_documents(docs)

    vectordb_chinese = Chroma.from_documents(
        documents=splits,
        embedding=embedding,
        persist_directory=persist_directory_chinese  # 允许我们将persist_directory目录保存到磁盘上
    )

    # 已经废弃，会自动保存
    vectordb_chinese.persist()

    vectordb_chinese = Chroma(
        persist_directory="./docs/chroma/matplotlib",
        embedding_function=embedding,
    )

    print(vectordb_chinese._collection.count())

    question = "这节课的主要话题是什么"
    docs = vectordb_chinese.similarity_search(question, k=3)
    print(len(docs))

    from langchain_openai import AzureChatOpenAI # 导入嵌入模型
    from tool import get_azure_endpoint, get_api_version, get_api_key
    llm = AzureChatOpenAI(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        azure_deployment="gpt-4o-mini",  # 重命名为 azure_deployment
        openai_api_version=get_api_version(),  # 参数名不变
        openai_api_key=get_api_key(),
        openai_api_type="azure",
    )
    response = llm.predict("你好")
    print(response)

    # 构建 prompt
    from langchain.prompts import PromptTemplate

    template = """使用以下上下文来回答最后的问题。如果你不知道答案，就说你不知道，不要试图编造答 案。最多使用三句话。尽量使答案简明扼要。总是在回答的最后说“谢谢你的提问!”。
    {context}
    问题: {question}
    有用的回答:"""
    QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"], template=template, )
    # 运行 chain
    from langchain.chains import RetrievalQA

    question = "这门课的主题是什么?"
    qa_chain = RetrievalQA.from_chain_type(llm,
                                           retriever=vectordb_chinese.as_retriever(),
                                           return_source_documents=True,
                                           chain_type_kwargs = {"prompt":QA_CHAIN_PROMPT})
    result = qa_chain({"query": question})
    print(result["result"])

    from langchain.memory import ConversationBufferMemory

    memory = CustomConversationBufferMemory(memory_key="chat_history", return_messages=True)


    qa_chain = RetrievalQA.from_chain_type(
        llm,
        retriever=vectordb_chinese.as_retriever(),
        chain_type="stuff",
        return_source_documents=True,
        chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
        memory=memory,
        output_key="answer"
    )

    question = "这门课需要学习 python 吗"
    result = qa_chain({"query": question})
    print(result["answer"])

    question1 = "为什么这门课需要这个前提?"
    result1 = qa_chain({"query": question1})
    print(result1['answer'])






