from flask import Blueprint, request, Response, current_app
from application.utils.general_tool import gcfnpdap, filter_docs, test_chat_history_length
from application.utils.vector import initialize_faiss_database
from application.utils.openai_api import gpt_three_stream

openai_bp = Blueprint('openai', __name__, url_prefix='/openai')


@openai_bp.route("/chat", methods=['POST'])
def openai_chat():
    """
    前端传递过来的数据格式：
    {
        "query": str,
        "history": List,
        "model_config": {
            "temperature": int, # openai 0-2. 其于模型未知
            "max_token": int, #最大值1024
            "system_prompt_code": int | None
            "system_prompt_content": str | None  最大值1500
            "knowledge": str | None # 允许选定知识库，如果选择知识库为None
        }
    }
    :return:
    """
    data = request.json
    if "query" not in data:
        pass

    elif "history" not in data:
        pass

    elif len(data["history"]) % 2 == 0:
        pass

    else:
        current_app.vkb = None
        current_app.train_prompt = None
        current_app.no_train_prompt = None
        current_app.max_chat_length = 8000

        # 相似性阀值，选用余弦相似度，1表示完全相关，0表示完全不相关
        current_app.similarity = 0.8

        new_question, chat_history = request.json

        # 相关的语料
        related_docs_content = None

        # 相关的doc
        related_docs = None

        if data["model_config"]["knowledge"]:
            vf_apath = gcfnpdap(__name__, 2) + "/static/store_vector_knowledge_dictory/" + "1716811846_2"
            current_app.vkb = initialize_faiss_database(vf_apath)

            if len(chat_history) > 2:
                # 历史相似片段
                history_related_docs = current_app.vkb.similarity_search_with_relevance_scores(chat_history[-3], 1)

                # 最新问题相似片段
                new_question_related_docs = current_app.vkb.similarity_search_with_relevance_scores(new_question, 2)

                related_docs = new_question_related_docs + history_related_docs

            else:
                # 最新问题相似片段
                new_question_related_docs = current_app.vkb.similarity_search_with_relevance_scores(new_question, 2)

                related_docs = new_question_related_docs

            # 相关的doc
            related_docs = filter_docs(related_docs, current_app.similarity)

            # related_docs 经过过滤后不为空，则查找最相似的doc的scores
            most_related_doc_scores = max([i[1] for i in related_docs])

            related_docs_content = [doc[0].page_content for doc in related_docs]
            related_docs_content = list(set(related_docs_content))
            related_docs_content = "\n\n".join(related_docs_content)

        # 最相关预料
        if related_docs:

            # 数据库操作
            # ------
            # ------
            # 这里data中传了代码过来，则用他的，使用数据库

            trained_prompt_parent_dir = gcfnpdap(__name__, 2) + "/static/trained/" + "file_name.txt"

            with open(trained_prompt_parent_dir, "r", encoding="UTF-8") as f:
                current_app.train_prompt = f.read()

            trained_prompt = current_app.train_prompt.format(doc=related_docs_content)

            chat_history.insert(0, trained_prompt)

            chat_history = test_chat_history_length(chat_history, current_app.max_chat_length)

            data["chat_history"] = chat_history

            return Response(gpt_three_stream(data), mimetype="text/event-stream")

        else:

            # 数据库操作
            # ------
            # ------

            no_trained_prompt_parent_dir = gcfnpdap(__name__, 2) + "/static/no_trained/" + "file_name.txt"

            with open(no_trained_prompt_parent_dir, "r", encoding="UTF-8") as f:
                current_app.no_train_prompt = f.read()

            chat_history.insert(0, current_app.no_train_prompt)

            chat_history = test_chat_history_length(chat_history, current_app.max_chat_length)

            data["chat_history"] = chat_history

            return Response(gpt_three_stream(data), mimetype="text/event-stream")



