from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain_community.document_loaders import PyPDFLoader, CSVLoader, TextLoader
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.prompts import ChatPromptTemplate
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain

import os


def get_response_from_file(key, files, question, re_chain, strategy):
    os.environ["DASHSCOPE_API_KEY"] = key

    response_list = []
    relevant_list = []

    llm = ChatOpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),  # 如果您没有配置环境变量，请在此处用您的API Key进行替换
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 填写DashScope base_url
        model="qwen-plus"
    )

    # 加载
    for file in files:
        file_content = file.read()

        # 使用os.path.splitext获取文件的扩展名
        file_name, file_extension = os.path.splitext(file.name)

        # 根据文件的扩展名来改变file_url的值
        file_url = f"other/test.{file_extension}"

        with open(file_url, "wb") as f:
            f.write(file_content)

        if file_url.endswith('.pdf'):
            loader = PyPDFLoader(file_url)
            document = loader.load()
        elif file_url.endswith('.csv'):
            loader = CSVLoader(file_url)
            document = loader.load()
        elif file_url.endswith('.txt'):
            loader = TextLoader(file_url, encoding='utf-8')
            document = loader.load()
        else:
            return None

        document = loader.load()

        # 分割
        text_split = RecursiveCharacterTextSplitter(
            chunk_size=600,
            chunk_overlap=100,
            separators=["\n\n", "\n", "。", "，", " ", ""]
        )
        documents = text_split.split_documents(document)

        # 嵌入
        embedding_model = DashScopeEmbeddings(
            model="text-embedding-v1"
        )

        db = FAISS.from_documents(documents, embedding_model)

        retriever = db.as_retriever()

        # 检索
        prompt_content = """
                "You are an assistant for question-answering tasks. "
                "Use the following pieces of retrieved context to answer "
                "the question. If you don't know the answer,say that you "
                "don't know. Use three sentences maximum and keep the "
                "answer concise."
                "\n\n"
                "{context}"
        """

        prompt_template = ChatPromptTemplate.from_messages(
            [
                ("system", prompt_content),
                ("human", "{input}")
            ]
        )

        if re_chain == '使用老版ConversationalRetrievalChain':
            memory = ConversationBufferMemory(
                return_messages=True,
                memory_key="chat_history",
                output_key="answer"
            )
            chain = ConversationalRetrievalChain.from_llm(
                llm=llm,
                memory=memory,
                retriever=retriever,
                chain_type=strategy
            )

            response = chain.invoke({"question": question})

        # elif re_chain == '使用新版create_retriever_chain': # 默认新版
        else:
            stuff_chain = create_stuff_documents_chain(llm=llm, prompt=prompt_template)

            chain = create_retrieval_chain(retriever, stuff_chain)

            response = chain.invoke({"input": question})

        ########
        retriever = db.as_retriever()

        # 检索
        docs = retriever.get_relevant_documents(question)

        # 提取相关片段内容
        relevant_content = "\n\n".join([doc.page_content for doc in docs])

        # return relevant_content
        ########

        # relevant_response = retriever.invoke(question)

        response_list.append(response["answer"])

        relevant_list.append(relevant_content)

    # return response['answer']
    body_prompt_content = """
                Please refer to the following known file 						information:

                {message}

                question answering:

                {question}

            Please provide a clear and concise statement, 	and do not fabricate it. If there is no problem 		related content in the known file information, 	please let me know that the problem is not 			included in the file content. 
    """

    body_prompt_template = ChatPromptTemplate.from_messages(
        [
            ("human", body_prompt_content)
        ]
    )

    body_prompt = body_prompt_template.invoke({"question": question, "message": str(response_list)})

    body_response = llm.invoke(body_prompt)

    return body_response.content, relevant_list
