import os
from dotenv import load_dotenv
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, \
    MessagesPlaceholder
from langchain.chat_models import init_chat_model
from langchain.agents import create_tool_calling_agent
from langchain.agents import AgentExecutor
from langchain.text_splitter import RecursiveCharacterTextSplitter
from PyPDF2 import PdfReader
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.tools.retriever import create_retriever_tool

# 1.读取PDF文件内容
def read_pdf(file_path):
    text =""
    pdf_reader = PdfReader(file_path)
    for page in pdf_reader.pages:
        text += page.extract_text() + "\n"
    return text

# 2.将原始文本内容分割为较小的块，其中chunk_size表示每个块的最大字符数，overlap表示块之间的重叠字符数，重叠有助于保持上下文连续性
def chunk_text(text, chunk_size=1000, overlap=200):
    text_spliter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=overlap)
    chunks = text_spliter.split_text(text)
    return chunks

# 3.将文本块存储到FAISS向量数据库中，此时需要使用文本嵌入模型EmbeddingModel，此处选择由阿里百炼提供的的在线文本嵌入模型
# 需安装依赖pip3 install faiss-cpu
def vector_store(chunks):
    embedding_model = DashScopeEmbeddings(
        model="text-embedding-v1", dashscope_api_key=os.getenv("DASHSCOPE_API_KEY")
    )
    faiss_vector_store = FAISS.from_texts(chunks,embedding=embedding_model)
    # 保存向量数据库到本地文件faiss_db
    faiss_vector_store.save_local("faiss_db")


# 4.构造LangChain Agent，使用FAISS向量数据库作为知识库，实现RAG
def langchain_agent_with_rag():
    # 加载向量数据库，需要用到之前存储时对应的embedding模型
    embedding_model = DashScopeEmbeddings(
        model="text-embedding-v1", dashscope_api_key=os.getenv("DASHSCOPE_API_KEY")
    )
    faiss_db = FAISS.load_local("faiss_db",embedding_model,allow_dangerous_deserialization=True)
    retriever = faiss_db.as_retriever()
    # 定义一个检索工具，从向量数据库中检索相关内容
    retriever_tool = create_retriever_tool(retriever,"pdf_extractor","This tool is to give answer to queries from the pdf document.")
    # 定义聊天模型
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    prompt = ChatPromptTemplate.from_messages([
        SystemMessagePromptTemplate.from_template("你是一个问答助手，可以根据用户的问题，从PDF文档中检索相关信息，给出准确的回答。如果无法从文档中找到答案，请说“抱歉，我无法从文档中找到相关信息。”"),
        HumanMessagePromptTemplate.from_template("{input}"),
        MessagesPlaceholder(variable_name="agent_scratchpad"),

    ])
    tools = [retriever_tool]
    model.bind_tools(tools)
    # 创建agent
    agent = create_tool_calling_agent(model,tools,prompt)
    agent_executor = AgentExecutor(agent=agent, tools=tools,verbose=True)
    response = agent_executor.invoke({"input":"请总结下KnowledgeFind有哪些功能？"})
    print(f"!!! Langchain agent with RAG, response:{response}, output:{response['output']}")


if __name__ == '__main__':
    load_dotenv(override=True)
    DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
    DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
    print(f"DEEPSEEK_API_KEY: {DEEPSEEK_API_KEY}, DASHSCOPE_API_KEY:{DASHSCOPE_API_KEY}")

    # 准备工作
    pdf_content = read_pdf("KnowledgeFind Feature Walkthrough.pdf")
    chunks = chunk_text(pdf_content)
    vector_store(chunks)
    # 运行LangChain Agent with RAG
    langchain_agent_with_rag()










