from langchain_core.prompts import ChatPromptTemplate

from config.template import judge_question_template
from utils.llm_generate import single_llm
from utils.log import logger
from utils.parser_json_output import parser_judge


def DesignJudge(item, complexity_int):
    """

       :param complexity: 问题难度
       :return: 生成的若干个问题，json数组
       """

    global data, complexity

    match complexity_int:
        case 1:
            complexity = "简单"
        case 2:
            complexity = "中等"
        case 3:
            complexity = "困难"

    logger.info(f"进入了设计判断题方法，此次成功题目的难度为{complexity}")
    # 判断题的题目设计+异常处理
    conversation = []
    input = {
        "text": item,
        "complexity": complexity,
        "conversation": conversation
    }

    logger.info(f"这个片段的文本为：{item}")
    llm = single_llm
    retry = True

    while retry:
        try:
            template_prompt = ChatPromptTemplate.from_messages(
                [
                    ("system", judge_question_template),
                    ("placeholder", "{conversation}"),
                    ("system", "以下是用户输入的文本： "),
                    ("human", "<文本>{text}</文本>"),  # input
                    ("system", "以下是用户选择的难易程度： "),
                    ("human", "<<难易程度>{complexity}</难易程度>")  # input
                ]
            )
            chain = template_prompt | llm | parser_judge
            data = chain.invoke(input)
            logger.info(f"输出json数组为: {data}")
            retry = False

        except Exception as e:
            errormsg = f'这是前几轮对话后产生的错误信息：{e},请检查上述错误信息并调整输出格式,确保输出为正确的json格式，'
            conversation.append(("human", errormsg))
            conversation.append(("ai", "收到命令，我会修改我的输出为正确的json格式"))
            input["conversation"] = conversation
            retry = True  # 重试

    logger.info("结束生成此处切分文档的判断题问题生成")
    return data
