import os

import logging
import chardet
from langchain_community.embeddings import DashScopeEmbeddings
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import Docx2txtLoader
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Qdrant
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain.chains import RetrievalQA
from flask import Flask, request, render_template

# 设置Logging
logging.basicConfig()
logging.getLogger('langchain.retrievers.multi_query').setLevel(logging.INFO)

# 获取当前工作目录
CURRENT_DIRECTORY_PATH = os.getcwd()

# 从环境变量中获取阿里云-百练 的API Key
DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
# 阿里云-百练的官网地址
DASHSCOPE_API_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"

app = Flask(__name__)


def _load_documents(base_dir: str):
    """
    加载文档（支持pdf、docx、txt）
    :param base_dir:
    :return:
    """

    # 加载Documents
    document_list = []
    for file in os.listdir(base_dir):
        # 构建完整的文件路径
        file_path = os.path.join(base_dir, file)
        if file.endswith('.pdf'):
            loader = PyPDFLoader(file_path)
            document_list.extend(loader.load())
        elif file.endswith('.docx'):
            loader = Docx2txtLoader(file_path)
            document_list.extend(loader.load())
        elif file.endswith('.txt'):
            # 检测文件编码
            with open(file_path, "rb") as f:
                result = chardet.detect(f.read())
            encoding = result["encoding"]
            loader = TextLoader(file_path, encoding=encoding)
            document_list.extend(loader.load())

    return document_list


def _build_qa_chain():
    # 1.文档加载
    document_base_dir = os.path.join(CURRENT_DIRECTORY_PATH, "documents")
    document_list = _load_documents(document_base_dir)

    # 2.分块
    # 将 document_list 切分成块以便后续进行嵌入和向量存储
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=50, chunk_overlap=10)
    chunked_document_list = text_splitter.split_documents(document_list)

    # 3.向量存储
    # 将分割嵌入并存储在矢量数据库 Qdrant 中
    embeddings = DashScopeEmbeddings(model="text-embedding-v3")
    vector_store = Qdrant.from_documents(
        documents=chunked_document_list,
        embedding=embeddings,
        location=":memory:",
        collection_name="life_guru_documents")

    # 4.检索
    # 准备模型和 Retrieval 链
    llm = ChatTongyi(model="qwen-max")
    retriever_from_llm = MultiQueryRetriever.from_llm(retriever=vector_store.as_retriever(), llm=llm)
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        retriever=retriever_from_llm,
        chain_type="stuff",
        return_source_documents=True
    )

    return qa_chain


qa_chain = _build_qa_chain()


def ask_life_guru(question: str):
    """
    向人生大师提问
    :param question:
    :return:
    """

    # 5.生成
    response = qa_chain({"query": question})
    return response["result"]


@app.route('/', methods=['GET', 'POST'])
def index():
    if request.method == 'POST':
        question = request.form.get('question')
        result = ask_life_guru(question)
        return render_template('index.html', result=result)

    return render_template('index.html')


if __name__ == "__main__":
    app.run(host='0.0.0.0', debug=True, port=8090)
