from langchain_community.document_loaders import PyPDFLoader
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.prompts import ChatPromptTemplate
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain

import os


def get_response_from_pdf(key, file, question):
    os.environ["DASHSCOPE_API_KEY"] = key

    llm = ChatOpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),  # 如果您没有配置环境变量，请在此处用您的API Key进行替换
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 填写DashScope base_url
        model="qwen-plus"
    )

    # 加载
    file_content = file.read()
    file_url = "other/test.pdf"
    with open(file_url, "wb") as f:
        f.write(file_content)
    loader = PyPDFLoader(file_url)
    document = loader.load()

    # 分割
    text_split = RecursiveCharacterTextSplitter(
        chunk_size=600,
        chunk_overlap=100,
        separators=["\n\n", "\n", "。", "，", " ", ""]
    )
    documents = text_split.split_documents(document)

    # 嵌入
    embedding_model = DashScopeEmbeddings(
        model="text-embedding-v1"
    )

    db = FAISS.from_documents(documents, embedding_model)

    retriever = db.as_retriever()

    # 检索
    prompt_content = """
        "You are an assistant for question-answering tasks. "
        "Use the following pieces of retrieved context to answer "
        "the question. If you don't know the answer,say that you "
        "don't know. Use three sentences maximum and keep the "
        "answer concise."
        "\n\n"
        "{context}"
"""

    prompt_template = ChatPromptTemplate.from_messages(
        [
            ("system", prompt_content),
            ("human", "{input}")
        ]
    )

    stuff_chain = create_stuff_documents_chain(llm=llm, prompt=prompt_template)

    chain = create_retrieval_chain(retriever, stuff_chain)

    response = chain.invoke({"input": question})

    return response['answer']
