'''
3.路由（Routing） 定义：根据输入内容或上下文，动态选择不同的处理路径或模型。

应用场景：
意图分类后路由：用户提问 → 判断是“客服”“技术”“销售” → 路由到对应处理模块。
多语言支持：检测语言 → 路由到对应语言的翻译或生成模型。
模型选择：简单问题用轻量模型，复杂问题用大模型。
安全过滤：敏感内容被路由到审核模块，正常内容进入生成流程。
✅ 优势：实现个性化、高效资源利用、增强系统灵活性。

'''
from typing_extensions import Literal
from langchain_core.messages import HumanMessage, SystemMessage
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
from pydantic import BaseModel, Field
from utils import init_llm, set_env, visualize_graph

class State(TypedDict):
    input: str
    decision: str
    output: str


class Route(BaseModel):
    step: Literal["poem", "story", "joke"] = Field(
        None, description="路由过程的下一步"
    )


def llm_call_1(state: State):
    """写一个故事"""

    result = llm.invoke(state["input"])
    return {"output": result.content}


def llm_call_2(state: State):
    """写一个笑话"""

    result = llm.invoke(state["input"])
    return {"output": result.content}


def llm_call_3(state: State):
    """写一段诗歌"""

    result = llm.invoke(state["input"])
    return {"output": result.content}


def llm_call_router(state: State):
    """将输入路由到相应的节点"""

    decision = router.invoke(
        [
            SystemMessage(
                content="根据用户的请求将输入路由到故事、笑话或诗歌。"
            ),
            HumanMessage(content=state["input"]),
        ]
    )

    return {"decision": decision.step}


def route_decision(state: State):
    if state["decision"] == "story":
        return "llm_call_1"
    elif state["decision"] == "joke":
        return "llm_call_2"
    elif state["decision"] == "poem":
        return "llm_call_3"

def build_graph():
    router_builder = StateGraph(State)

    router_builder.add_node("llm_call_1", llm_call_1)
    router_builder.add_node("llm_call_2", llm_call_2)
    router_builder.add_node("llm_call_3", llm_call_3)
    router_builder.add_node("llm_call_router", llm_call_router)

    router_builder.add_edge(START, "llm_call_router")
    router_builder.add_conditional_edges(
        "llm_call_router",
        route_decision,
        {  
            "llm_call_1": "llm_call_1",
            "llm_call_2": "llm_call_2",
            "llm_call_3": "llm_call_3",
        },
    )
    router_builder.add_edge("llm_call_1", END)
    router_builder.add_edge("llm_call_2", END)
    router_builder.add_edge("llm_call_3", END)

    graph = router_builder.compile()

    return graph


if __name__ == "__main__":
    set_env()  # 设置环境变量
    llm = init_llm(temperature=0)  # 初始化LLM，温度设为0以获得确定性输出
    router = llm.with_structured_output(Route)
    graph = build_graph()  # 构建工作流图
 
    visualize_graph(graph)

    state = graph.invoke({"input": "写一个关于猫的笑话/no_think"})
    print(state["output"])







