import json
import random
from contextlib import asynccontextmanager
from itertools import chain
from typing import Any, Dict, List, Optional, Tuple

import fastapi

import tree_rag.agents as agents
from tree_rag import adapters
from tree_rag.adapters import embedding
from tree_rag.agents import Prompt
from tree_rag.agents.tone_converter import apply_tone_to_prompt
from tree_rag.dataclasses.api import (
    DirectSimplifyAnswerRequest,
    QA,
    AssistantInfo,
    BaseResponse,
    Clarify,
    DeleteRequest,
    Explanation,
    ExtractionConfig,
    Knowledge,
    RefuseWords,
    RefuseWordsExclusion,
    RerankSimilarityRequest,
    RerankSimilarityResponse,
    SimpleAnswerPrompt,
    SimplifyAnswerRequest,
    SimplifyAnswerResponse,
    WorkflowByAgentRequest,
    WorkflowRequest,
    WorkflowResponse,
)
from tree_rag.dataclasses.dify import DifyKnowledge
from tree_rag.dataclasses.knowledge import ALL_NODES_TYPE, Graph, Scene, refine_graph
from tree_rag.es import (
    clear_cache,
    delete,
    get_all_nodes,
    get_assistant_by_channel_id,
    get_assistant_by_name,
    get_refuse_config,
    get_simple_answer_prompt_by_id,
    get_simple_answer_prompt_by_tenant_channel,
    index_assistant,
    index_clarify,
    index_document,
    index_explanation,
    index_refuse_words,
    index_refuse_words_exclusion,
    index_simple_answer_prompt,
    query_clarify,
    search,
    search_information,
)
from tree_rag.session import Role, Session, Status, session_manager


@asynccontextmanager
async def lifespan(app: fastapi.FastAPI):
    await clear_cache()
    yield
    await session_manager.close()


app = fastapi.FastAPI(lifespan=lifespan)


@app.post("/graph")
async def graph(request: Knowledge = fastapi.Body(...)) -> BaseResponse:
    question_embedding: List[float] = await embedding(request.question)
    answer_embedding: List[float] = await embedding(request.answer)
    await index_document(request, question_embedding, answer_embedding)
    return BaseResponse()


@app.post("/explanation")
async def explanation(request: Explanation = fastapi.Body(...)) -> BaseResponse:
    await index_explanation(request)
    return BaseResponse()


@app.post("/clarify")
async def clarify(request: Clarify = fastapi.Body(...)) -> BaseResponse:
    await index_clarify(request)
    return BaseResponse()


@app.post("/refuse_words")
async def refuse_words(request: RefuseWords = fastapi.Body(...)) -> BaseResponse:
    await index_refuse_words(request)
    return BaseResponse()


@app.post("/refuse_words_exclusion")
async def refuse_words_exclusion(
    request: RefuseWordsExclusion = fastapi.Body(...),
) -> BaseResponse:
    await index_refuse_words_exclusion(request)
    return BaseResponse()


@app.post("/assistant_info")
async def assistant_info(request: AssistantInfo = fastapi.Body(...)) -> BaseResponse:
    await index_assistant(request)
    return BaseResponse()


@app.post("/simple_answer_prompt")
async def simple_answer_prompt(request: SimpleAnswerPrompt = fastapi.Body(...)) -> BaseResponse:
    await index_simple_answer_prompt(request)
    return BaseResponse()


@app.post("/direct_simplify_answer")
async def direct_simplify_answer(request: DirectSimplifyAnswerRequest = fastapi.Body(...)) -> SimplifyAnswerResponse:
    """直接通过提示词和详细答案获取极简答案的调试接口

    这个接口允许客户直接传入提示词和详细答案进行实时调试，
    无需先保存并训练提示词，确认效果满意后再保存。
    """
    # Process the answer simplification directly using the provided prompt
    if len(request.detailed_answer) <= 100:
        # If answer is already short, return as-is
        simplified_answer = request.detailed_answer
    else:
        # If answer is long, use LLM to simplify it to 100 characters or less
        simplified_answer = await agents.simplify_answer.run_agent(
            request.prompt, request.detailed_answer
        )

    return SimplifyAnswerResponse(
        code=200,
        msg="success",
        data={"simplified_answer": simplified_answer}
    )


@app.post("/simplify_answer")
async def simplify_answer(request: SimplifyAnswerRequest = fastapi.Body(...)) -> SimplifyAnswerResponse:
    # Get the prompt based on the request parameters
    prompt_obj = None

    if request.prompt_id:
        prompt_obj = await get_simple_answer_prompt_by_id(request.prompt_id)
        if not prompt_obj:
            return SimplifyAnswerResponse(
                code=404,
                msg="未找到对应的极简答案整理提示词"
            )
    elif request.tenant_id and request.channel_id:
        prompt_obj = await get_simple_answer_prompt_by_tenant_channel(
            request.tenant_id, request.channel_id
        )
        if not prompt_obj:
            return SimplifyAnswerResponse(
                code=404,
                msg="未找到对应的极简答案整理提示词"
            )
    else:
        return SimplifyAnswerResponse(
            code=400,
            msg="参数错误：请提供tenant_id+channel_id或prompt_id其中一种方式"
        )

    # Process the answer simplification
    if len(request.detailed_answer) <= 100:
        # If answer is already short, return as-is
        simplified_answer = request.detailed_answer
    else:
        # If answer is long, use LLM to simplify it to 100 characters or less
        simplified_answer = await agents.simplify_answer.run_agent(
            prompt_obj.prompt, request.detailed_answer
        )

    return SimplifyAnswerResponse(
        code=200,
        msg="success",
        data={"simplified_answer": simplified_answer}
    )


@app.post("/delete")
async def delete_(request: DeleteRequest = fastapi.Body(...)) -> BaseResponse:
    await delete(request.id_type, request.id)
    return BaseResponse()


async def _extraction(
    session: Session, extraction_config: ExtractionConfig
) -> Tuple[Dict[str, Any], str, List[str]]:
    cached_params = session.extra.get("cached_params", {})

    params = await agents.common_extraction.run_agent(
        context=session.chat_history(),
        prompt_template=extraction_config.prompt,
    )
    for key in params:
        if key in cached_params and not params[key]:
            params[key] = cached_params[key]

    for key, value in extraction_config.failure_default.items():
        if key not in params:
            params[key] = value

    missing_keys = set(extraction_config.reask_words.keys()) - set(params.keys())
    reask_words = ""
    for key in extraction_config.reask_words:
        if key not in params or not params[key]:
            reask_words = extraction_config.reask_words[key]
            break

    return params, reask_words, list(missing_keys)


async def _direct_answer(
    session: Session, request: WorkflowRequest, role_prompt: str, summary: str = None
) -> WorkflowResponse:
    if summary is None:
        user_history = "\n".join(session.user_history())
        summary = await agents.summary.run_agent(user_history)

    qs_knowledges: List[Knowledge] = await search(
        request.tenant_id,
        request.channel_id,
        query=summary,
        mode="q",
        rerank_threshold=0.0,
        size=5,
        search_other=True,
        search_self_generated=False,
    )
    refuse_config = await get_refuse_config(request.tenant_id, request.channel_id)
    if len([item for item in qs_knowledges if item.matched_score >= 0.8]) > 0:
        qs_knowledges = [item for item in qs_knowledges if item.matched_score >= 0.8]
        prompt: Prompt = agents.rag.build_prompt(
            qs_knowledges,
            session.history,
            role_prompt=role_prompt,
            use_llm_for_unknown=request.use_llm_for_unknown,
            exclusion=refuse_config.get("exclusion", []),
        )
        # 应用口吻转换
        system_prompt, user_prompt = apply_tone_to_prompt(
            prompt.system_prompt, prompt.user_prompt, request.tone
        )

        response = WorkflowResponse(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            knowledge=[DifyKnowledge.from_knowledge(k) for k in qs_knowledges],
            graph=session.graph,
            status_code=Status.RAG_ANSWER,
        )
        session.extra["status"] = Status.RAG_ANSWER
        await session_manager.put(session)
        return response

    # 搜索答案，看答案中有没有可以直接回答的
    as_knowledges = await search(
        request.tenant_id,
        request.channel_id,
        query=summary,
        mode="a",
        rerank_threshold=0.8,
        search_other=True,
        search_self_generated=False,
        size=5,
    )
    if len([item for item in as_knowledges if item.matched_score >= 0.8]) > 0:
        as_knowledges = [item for item in as_knowledges if item.matched_score >= 0.8]
        prompt: Prompt = agents.rag.build_prompt(
            as_knowledges,
            session.history,
            role_prompt=role_prompt,
            use_llm_for_unknown=request.use_llm_for_unknown,
            exclusion=refuse_config.get("exclusion", []),
        )
        # 应用口吻转换
        system_prompt, user_prompt = apply_tone_to_prompt(
            prompt.system_prompt, prompt.user_prompt, request.tone
        )

        response = WorkflowResponse(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            knowledge=[DifyKnowledge.from_knowledge(k) for k in as_knowledges],
            graph=session.graph,
            status_code=Status.RAG_ANSWER,
        )
        session.extra["status"] = Status.RAG_ANSWER
        await session_manager.put(session)
        return response

    similar_knowledges = sorted(
        qs_knowledges + as_knowledges, key=lambda k: k.matched_score, reverse=True
    )

    # 如果配置了使用大模型回答未知问题，则使用大模型回答
    if request.use_llm_for_unknown:
        llm_prompt: Prompt = agents.llm_answer.build_prompt(
            request.user_input, session.history, role_prompt=role_prompt
        )
        # 应用口吻转换
        system_prompt, user_prompt = apply_tone_to_prompt(
            llm_prompt.system_prompt, llm_prompt.user_prompt, request.tone
        )

        response = WorkflowResponse(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            knowledge=[DifyKnowledge.from_knowledge(k) for k in similar_knowledges],
            graph=session.graph,
            status_code=Status.REFUSE,
        )
        session.extra["status"] = Status.REFUSE
    else:
        # 拒答
        refuse_word = (
            random.choice(refuse_config["words"])
            if refuse_config.get("words")
            else None
        )

        prompt: Prompt = agents.decline.build_prompt(
            request.user_input,
            session.history,
            role_prompt=role_prompt,
            refuse_word=refuse_word,
            exclusion=refuse_config.get("exclusion", []),
        )
        # 应用口吻转换
        system_prompt, user_prompt = apply_tone_to_prompt(
            prompt.system_prompt, prompt.user_prompt, request.tone
        )

        response = WorkflowResponse(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            knowledge=[DifyKnowledge.from_knowledge(k) for k in similar_knowledges],
            graph=session.graph,
            status_code=Status.REFUSE,
        )
        session.extra["status"] = Status.REFUSE

    await session_manager.put(session)
    return response


async def _handle_reask(
    request: WorkflowRequest, session: Session
) -> Optional[WorkflowResponse]:
    if not session.extra.get("reask_flag", False):
        return None

    legacy_missing_keys = session.extra.get("missing_keys", [])
    extracion: ExtractionConfig = ExtractionConfig(**session.extra["extraction"])

    params, reask_words, missing_keys = await _extraction(session, extracion)
    # 如果missing_keys 包含 legacy_missing_keys，那么reask_words 为空
    if set(missing_keys).issubset(set(legacy_missing_keys)):
        reask_words = ""

    elif reask_words:
        prompt: Prompt = agents.repeat.build_prompt(reask_words)
        # 应用口吻转换
        system_prompt, user_prompt = apply_tone_to_prompt(
            prompt.system_prompt, prompt.user_prompt, request.tone
        )

        response = WorkflowResponse(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            knowledge=[],
            graph=session.graph,
            status_code=Status.REASK_INFO,
        )
        await session_manager.put(session)
        return response

    return None


def _need_clarify(knowledges: List[Knowledge]) -> bool:
    if len(knowledges) <= 0:
        return False

    if len(knowledges[0].graph.scene) == 0:
        return False

    return True


async def _workflow(request: WorkflowRequest) -> WorkflowResponse:
    if request.reset:
        await session_manager.delete(request.conversation_id)

    session = await session_manager.pop(request.conversation_id)
    session.fix()

    # Get refuse config for exclusion handling
    refuse_config = await get_refuse_config(request.tenant_id, request.channel_id)

    session.add_history(Role.USER, request.user_input)

    if response := await _handle_reask(request, session):
        return response

    q_knowledges: List[Knowledge] = await search(
        request.tenant_id,
        request.channel_id,
        query=request.user_input,
        mode="q",
        rerank_threshold=0.5,
        size=3,
    )

    # 抽取图谱
    all_nodes: ALL_NODES_TYPE = None
    all_nodes, kg_tree = await get_all_nodes(request.tenant_id, request.channel_id)

    skills = all_nodes.all_level_1.copy()
    # TODO hard coding here, 如果候选一级事项里面包含社保，则暂时去除
    if "社保" in skills and len(skills) > 1:
        skills.remove("社保")

    # 构建身份Prompt
    if hasattr(request, "service") and request.service:
        role_prompt = (
            "如果用户问你身份活能力范围相关的问题，你可以这样回答："
            + request.service
            + "\n以上也是你的身份设定，如果用户问到，尽量回答完整。"
        )
    else:
        role_prompt = f"你是政务智能助手，隶属于{request.position}， 你的名字叫{request.agent_name}。可以回答{'、'.join(skills[:10])}的相关问题。"

    icl_knowledges: List[Knowledge] = await search(
        request.tenant_id,
        request.channel_id,
        query=request.user_input,
        mode="gc",
        rerank_threshold=0.8,
        size=3,
    )
    if request.use_qa_as_example and not icl_knowledges:
        icl_knowledges = q_knowledges.copy()

    # 抽取
    graph: Graph = await agents.extraction.run_agent(
        request.user_input, all_nodes, icl_knowledges, role_prompt
    )
    level_2_guard = False  # 反问二级事项免死金牌，硬编码
    # TODO hard coding here，社保容易误抽取
    if "社保" in graph.level_1:
        graph.level_1.remove("社保")
        if "养老保险" not in graph.level_1 and "养老保险" in all_nodes.all_level_1:
            graph.level_1.append("养老保险")
            level_2_guard = True
    # TODO hard coding here，参保身份容易误抽取
    scene_to_keep = []

    # 只在没有上下文学习示例的情况下，做严格参保身份校验
    if not icl_knowledges:
        for scene in graph.scene:
            if scene.category == "参保身份":
                triage_label = await agents.triage.run_agent(
                    request.user_input, [scene.label]
                )
                if triage_label != scene.label:
                    continue
            scene_to_keep.append(scene)
        graph.scene = scene_to_keep

    # TODO hard coding here, 防止连续范围两次二级事项
    if len(graph.level_2) == 0 and (
        session.graph is not None
        and len(session.graph.level_2) == 0
        and session.graph.level_1 == graph.level_1
        and session.graph.level_1  # 纯空事项也不拦截，因为走了直问直答
    ):
        refuse_config = await get_refuse_config(request.tenant_id, request.channel_id)
        refuse_word = (
            random.choice(refuse_config["words"])
            if refuse_config.get("words")
            else None
        )

        session.update_graph(graph, kg_tree=kg_tree)
        prompt = agents.decline.build_prompt(
            request.user_input,
            session.history,
            role_prompt=role_prompt,
            refuse_word=refuse_word,
            exclusion=refuse_config.get("exclusion", []),
        )
        # 应用口吻转换
        system_prompt, user_prompt = apply_tone_to_prompt(
            prompt.system_prompt, prompt.user_prompt, request.tone
        )

        response = WorkflowResponse(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            knowledge=[],
            graph=session.graph,
        )
        await session_manager.put(session)
        return response

    session.update_graph(graph, kg_tree=kg_tree)

    if (
        graph.is_empty()
        and session.extra
        and "clarify_candidates" in session.extra
        and await agents.triage.run_agent(
            request.user_input,
            all_nodes.all_level_1 + ["都要办理", "都可以", "都想了解一下", "都要参加"],
            use_other=True,
            use_empty=False,
            use_examples=False,
        )
        in ["都要办理", "都可以", "都想了解一下", "都要参加"]
    ):
        status = session.extra["status"]
        if status == Status.REASK_LEVEL_1:
            session.graph.level_1 = session.extra["clarify_candidates"]
        elif status == Status.REASK_LEVEL_2:
            session.graph.level_2 = session.extra["clarify_candidates"]
        elif status == Status.REASK_SCENE:
            category = session.extra["clarify_category"]
            # Remove existing scenes of the same category
            session.graph.scene = [
                s for s in session.graph.scene if s.category != category
            ]
            # Append all candidate scenes for this category
            for label in session.extra["clarify_candidates"]:
                session.graph.scene.append(Scene(category=category, label=label))

    elif graph.is_empty() and (
        not session.graph.level_1
        or not session.graph.level_2
        or (session.graph.level_1 and session.graph.level_2)
    ):
        return await _direct_answer(session, request, role_prompt=role_prompt)

    session.graph = refine_graph(kg_tree, session.graph, all_nodes)

    # 缺少一级事项
    if len(session.graph.level_1) == 0:
        # 如果有二级事项，只澄清二级事项对应的一级事项
        clarify_level_1 = set()
        if not graph.level_2:
            clarify_level_1 = all_nodes.all_level_1
        else:
            for level_1 in all_nodes.all_level_1:
                for level_2 in graph.level_2:
                    if level_2 in kg_tree[level_1]:
                        clarify_level_1.add(level_1)

        # TODO hard coding here, 如果候选一级事项里面包含社保，则暂时去除
        if "社保" in clarify_level_1 and len(clarify_level_1) > 1:
            clarify_level_1.remove("社保")
        # 如果clarify_level_1元素数量大于10，随机选10个
        if len(clarify_level_1) > 10:
            clarify_level_1 = random.sample(list(clarify_level_1), 10)
        prompt: Prompt = agents.clarify.build_prompt(
            [], clarify_level_1, session.graph.to_str(), role_prompt, exclusion=refuse_config.get("exclusion", [])
        )
        # 应用口吻转换
        system_prompt, user_prompt = apply_tone_to_prompt(
            prompt.system_prompt, prompt.user_prompt, request.tone
        )

        response = WorkflowResponse(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            knowledge=[],
            graph=session.graph,
            status_code=Status.REASK_LEVEL_1,
        )
        session.extra["status"] = Status.REASK_LEVEL_1
        session.extra["clarify_candidates"] = list(clarify_level_1)
        await session_manager.put(session)
        return response

    # 缺少二级事项
    if len(session.graph.level_2) == 0:
        # 如果有一级事项，只澄清一级事项对应的二级事项
        clarify_level_2 = set()
        if not graph.level_1:
            clarify_level_2 = all_nodes.all_level_2
        else:
            for level_2 in all_nodes.all_level_2:
                for level_1 in graph.level_1:
                    # TODO 这里可能要找一下，为什么level_1 会不再kg_tree里面
                    if level_1 in kg_tree and level_2 in kg_tree[level_1]:
                        clarify_level_2.add(level_2)

        if len(clarify_level_2) == 0:
            return await _direct_answer(session, request, role_prompt=role_prompt)

        # 分类
        triage_tag = await agents.triage.run_agent(
            request.user_input, list(clarify_level_2)
        )
        if triage_tag in ["other"] and not level_2_guard:
            # return await _direct_answer(session, request, role_prompt=role_prompt)
            pass
        else:
            prompt: Prompt = agents.clarify.build_prompt(
                [], clarify_level_2, session.graph.to_str(), role_prompt, exclusion=refuse_config.get("exclusion", [])
            )
            # 应用口吻转换
            system_prompt, user_prompt = apply_tone_to_prompt(
                prompt.system_prompt, prompt.user_prompt, request.tone
            )

            response = WorkflowResponse(
                system_prompt=system_prompt,
                user_prompt=user_prompt,
                knowledge=[],
                graph=session.graph,
                status_code=Status.REASK_LEVEL_2,
            )
            session.extra["status"] = Status.REASK_LEVEL_2
            session.extra["clarify_candidates"] = list(clarify_level_2)
            await session_manager.put(session)
            return response

    user_history = "\n".join(session.user_history())
    summary = await agents.summary.run_agent(user_history)

    # 场景引导
    g_knowledges = await search(
        request.tenant_id,
        request.channel_id,
        query=session.graph,
        mode="g",
        size=100,  # TODO hard coding here, 假设100个已经足够
        search_self_generated=False,
        text_query=summary,
    )

    if _need_clarify(g_knowledges):
        cur_categorys = [scene.category for scene in session.graph.scene]
        category2candidates = {}
        for knowledge in g_knowledges:
            for scene in knowledge.graph.scene:
                if scene.category not in category2candidates:
                    category2candidates[scene.category] = set()
                category2candidates[scene.category].add(scene.label)

        for category, candidates in category2candidates.items():
            if len(candidates) <= 1:
                continue

            if category in cur_categorys:
                continue

            if category in session.categories:
                continue

            session.categories.append(category)
            clarify_words = await query_clarify(
                request.tenant_id, request.channel_id, category
            )

            prompt: Prompt = agents.clarify.build_prompt(
                clarify_words,
                all_nodes.all_scene[category],
                session.graph.to_str(),
                role_prompt,
                exclusion=refuse_config.get("exclusion", []),
            )
            # 应用口吻转换
            system_prompt, user_prompt = apply_tone_to_prompt(
                prompt.system_prompt, prompt.user_prompt, request.tone
            )

            response = WorkflowResponse(
                system_prompt=system_prompt,
                user_prompt=user_prompt,
                knowledge=[],
                graph=session.graph,
                status_code=Status.REASK_SCENE,
            )
            session.extra["status"] = Status.REASK_SCENE
            session.extra["clarify_candidates"] = all_nodes.all_scene[category]
            session.extra["clarify_category"] = category
            await session_manager.put(session)
            return response

    user_history = "\n".join(session.user_history())
    summary = await agents.summary.run_agent(user_history)
    # do triage
    candidates = list(set(chain(*[item.graph.keywords for item in g_knowledges])))
    triage_tag = await agents.triage.run_agent(
        summary, candidates, use_empty=False, use_other=False
    )
    filtered_g_knowledges = [
        item for item in g_knowledges if triage_tag in item.graph.keywords
    ]
    if not filtered_g_knowledges:
        filtered_g_knowledges = g_knowledges

    # RAG
    if len(filtered_g_knowledges) > 0:
        information_params = {}
        calculation_params = {}
        reask_words = ""
        for cur_kg in filtered_g_knowledges:
            if cur_kg.ktype == "calculator":
                calculation_params, reask_words, missing_keys = await _extraction(
                    session, cur_kg.extraction
                )
                break

            if cur_kg.ktype == "information":
                information_params, reask_words, missing_keys = await _extraction(
                    session, cur_kg.extraction
                )
                break

        if not reask_words:
            session.extra["reask_flag"] = False

        if reask_words:
            prompt: Prompt = agents.repeat.build_prompt(reask_words)
            # 应用口吻转换
            system_prompt, user_prompt = apply_tone_to_prompt(
                prompt.system_prompt, prompt.user_prompt, request.tone
            )

            response = WorkflowResponse(
                system_prompt=system_prompt,
                user_prompt=user_prompt,
                knowledge=[],
                graph=session.graph,
                calculation_params=calculation_params,
                status_code=Status.REASK_INFO,
            )
            session.extra["status"] = Status.REASK_INFO
            session.extra["reask_flag"] = True
            session.extra["extraction"] = cur_kg.extraction.model_dump()
            if "cached_params" not in session.extra:
                session.extra["cached_params"] = {}
            session.extra["cached_params"].update(calculation_params)
            session.extra["cached_params"].update(information_params)
            session.extra["missing_keys"] = missing_keys

        elif information_params:
            information_knowledges = await search_information(
                query=json.dumps(information_params, ensure_ascii=False),
                level_1=cur_kg.graph.level_1[0],
                tenant_id=request.tenant_id,
                channel_id=request.channel_id,
                mode="a",
                size=10,
            )
            prompt: Prompt = agents.repeat.build_prompt(
                "为您查询到如下相关信息，仅供参考"
            )
            # 应用口吻转换
            system_prompt, user_prompt = apply_tone_to_prompt(
                prompt.system_prompt, prompt.user_prompt, request.tone
            )

            response = WorkflowResponse(
                system_prompt=system_prompt,
                user_prompt=user_prompt,
                knowledge=[
                    DifyKnowledge.from_knowledge(k) for k in information_knowledges
                ],
                graph=session.graph,
                calculation_params=calculation_params,
                status_code=Status.INFO_ANSWER,
            )
            session.extra["status"] = Status.INFO_ANSWER
        elif (
            len(filtered_g_knowledges) <= 3
            and filtered_g_knowledges[0].matched_score == 1
            and not graph.is_empty()
            and not request.strict_mode
        ):
            prompt: Prompt = agents.format_answer.build_prompt(
                session.history,
                "\n\n".join([item.answer for item in filtered_g_knowledges]),
            )
            # 应用口吻转换
            system_prompt, user_prompt = apply_tone_to_prompt(
                prompt.system_prompt, prompt.user_prompt, request.tone
            )

            response = WorkflowResponse(
                system_prompt=system_prompt,
                user_prompt=user_prompt,
                knowledge=[
                    DifyKnowledge.from_knowledge(k) for k in filtered_g_knowledges[:10]
                ],
                graph=session.graph,
                calculation_params=calculation_params,
                status_code=Status.GRAPH_ANSWER,
            )
            session.extra["status"] = Status.GRAPH_ANSWER

        else:
            prompt: Prompt = agents.rag.build_prompt(
                filtered_g_knowledges[:10],
                session.history,
                role_prompt,
                use_llm_for_unknown=request.use_llm_for_unknown,
                exclusion=refuse_config.get("exclusion", []),
            )
            # 应用口吻转换
            system_prompt, user_prompt = apply_tone_to_prompt(
                prompt.system_prompt, prompt.user_prompt, request.tone
            )

            response = WorkflowResponse(
                system_prompt=system_prompt,
                user_prompt=user_prompt,
                knowledge=[
                    DifyKnowledge.from_knowledge(k) for k in filtered_g_knowledges[:10]
                ],
                graph=session.graph,
                calculation_params=calculation_params,
                status_code=Status.RAG_ANSWER,
            )
            session.extra["status"] = Status.GRAPH_ANSWER

        await session_manager.put(session)
        return response

    return await _direct_answer(
        session, request, role_prompt=role_prompt, summary=summary
    )


@app.post("/workflow")
async def workflow(request: WorkflowRequest = fastapi.Body(...)) -> WorkflowResponse:
    return await _workflow(request)


@app.post("/callback")
async def callback(
    conversation_id: str = fastapi.Form(...), llm_response: str = fastapi.Form(...)
) -> BaseResponse:
    session = await session_manager.pop(conversation_id)
    if session.validate():
        await session_manager.put(session)
        return BaseResponse()

    session.add_history(Role.ASSISTANT, llm_response)
    await session_manager.put(session)
    return BaseResponse()


@app.post("/format")
async def format(request: QA = fastapi.Body(...)) -> QA:
    format_answer = await agents.format_answer.run_agent(
        request.question, request.answer
    )
    return QA(question=request.question, answer=format_answer)


@app.post("/rerank_similarity", response_model=RerankSimilarityResponse)
async def rerank_similarity(request: RerankSimilarityRequest):
    """
    Calculate similarity between two texts using the rerank model.
    """
    results: List[adapters.RerankResult] = await adapters.rerank(
        [request.text1], request.text2
    )
    similarity_score = results[0].relevance_score

    return RerankSimilarityResponse(similarity_score=similarity_score)


@app.post("/workflow_by_agent")
async def workflow_by_agent(
    request: WorkflowByAgentRequest = fastapi.Body(...),
) -> WorkflowResponse:
    if request.agent_name:
        assistant_info = await get_assistant_by_name(request.agent_name)
        if assistant_info is None:
            raise fastapi.HTTPException(
                status_code=404,
                detail=f"Assistant with name {request.agent_name} not found",
            )
    else:
        assistant_info = await get_assistant_by_channel_id(request.channel_id)
        if assistant_info is None:
            raise fastapi.HTTPException(
                status_code=404,
                detail=f"Assistant with channel_id {request.channel_id} not found",
            )

    workflow_request = WorkflowRequest(
        tenant_id=assistant_info.tenant_id,
        channel_id=assistant_info.channel_id,
        conversation_id=request.conversation_id,
        user_input=request.user_input,
        reset=request.reset,
        agent_name=assistant_info.agent_name,
        position=assistant_info.position,
        service=assistant_info.service,
        use_qa_as_example=request.use_qa_as_example,
        use_llm_for_unknown=request.use_llm_for_unknown,
        strict_mode=request.strict_mode,
        tone=request.tone,
    )
    response = await _workflow(workflow_request)
    return response
