import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.tools.retriever import create_retriever_tool
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_tool_calling_agent

import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"  # 修复多核问题
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')  # 获取环境变量中的OpenAI API密钥

embeddings = SpacyEmbeddings(model_name="zh_core_web_sm")  # 加载中文的spaCy嵌入向量模型


def pdf_read(pdf_doc):
    text = ""
    for pdf in pdf_doc:
        pdf_reader = PdfReader(pdf)  # 创建PDF读取器
        for page in pdf_reader.pages:
            text += page.extract_text()  # 提取PDF页面的文本
    return text


def get_chunks(text):
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)  # 文本切割工具
    chunks = text_splitter.split_text(text)  # 将文本切割成块
    return chunks


def vector_store(text_chunks):
    vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)  # 使用FAISS创建向量存储
    vector_store.save_local("faiss_db")  # 本地保存向量存储数据库


def get_conversational_chain(tools, ques):
    llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, api_key=OPENAI_API_KEY)  # 创建聊天模型实例
    prompt = ChatPromptTemplate.from_messages(
        [
            (
                "system",
                "您是一个有用的助手。根据提供的上下文详细回答问题，确保提供所有细节。如果答案不在提供的上下文中，请回答：“上下文中没有答案”，不要提供错误答案。"
            ),
            ("placeholder", "{chat_history}"),
            ("human", "{input}"),
            ("placeholder", "{agent_scratchpad}"),
        ]
    )
    tool = [tools]
    agent = create_tool_calling_agent(llm, tool, prompt)  # 创建调用工具的代理
    agent_executor = AgentExecutor(agent=agent, tools=tool, verbose=True)  # 执行代理
    response = agent_executor.invoke({"input": ques})
    print(response)
    st.write("回复：", response['output'])  # 显示回复


def user_input(user_question):
    new_db = FAISS.load_local("faiss_db", embeddings, allow_dangerous_deserialization=True)  # 加载本地向量存储
    retriever = new_db.as_retriever()  # 创建检索器
    retrieval_chain = create_retriever_tool(retriever, "pdf_extractor", "用于回答PDF文件中的查询")
    get_conversational_chain(retrieval_chain, user_question)  # 获取对话链


def main():
    st.set_page_config("Chat PDF")  # 设置页面配置
    st.header("基于RAG的PDF聊天")
    user_question = st.text_input("与PDF文件对话")  # 用户输入问题
    if user_question:
        user_input(user_question)
    with st.sidebar:
        st.title("目录：")
        pdf_doc = st.file_uploader("上传您的PDF文件并点击提交&处理按钮", accept_multiple_files=True)  # 上传PDF文件
        if st.button("提交&处理"):
            with st.spinner("处理中..."):
                raw_text = pdf_read(pdf_doc)
                text_chunks = get_chunks(raw_text)
                vector_store(text_chunks)
                st.success("完成")


if __name__ == "__main__":
    main()
