import json

from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph,START,END
from typing_extensions import TypedDict

from common.ali_qwen import get_model

llm = get_model()

class State(TypedDict):
    input:str
    decision:str
    output:str

#图中的节点，每个节点处理特定的路由
def llm_call_1(state:State):
    """写一个故事"""
    result = llm.invoke(input=state['input'])
    return {"output":result.content}

def llm_call_2(state:State):
    """写一个笑话"""
    result = llm.invoke(input=state['input'])
    return {"output":result.content}

def llm_call_3(state:State):
    """写一首歌"""
    result = llm.invoke(input=state['input'])
    return {"output":result.content}

def llm_call_router(state:State):
    """使用结构化输出将输入路由到适当的节点"""
    model = ChatOpenAI(
        model="qwen-plus",
        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
        api_key="sk-965dc39b016c49ecbe29de180f4db2b6",
        # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        model_kwargs={"response_format": {"type":"json_object"}},
    )
    ai_msg = model.invoke(
        [
            SystemMessage(content="You are a router that directs user input to the appropriate handler.Return a"
                                  " JSON object whit 'step' key and one of these values: 'story','joke','poem'. for example:"
                                  "{'step':'joke'}"),
            HumanMessage(content=state['input']),
        ]
    )
    decision = json.loads(ai_msg.content)
    return {"decision":decision['step']}

def route_decision(state:State):
    if state['decision'] == 'story':
        return "llm_call_1"
    elif state['decision'] == 'joke':
        return "llm_call_2"
    elif state['decision'] == 'poem':
        return "llm_call_3"


router_builder = StateGraph(State)

router_builder.add_node("llm_call_1", llm_call_1)
router_builder.add_node("llm_call_2", llm_call_2)
router_builder.add_node("llm_call_3", llm_call_3)
router_builder.add_node("llm_call_router", llm_call_router)
router_builder.add_edge(START,"llm_call_router")

router_builder.add_conditional_edges("llm_call_router",
                                     route_decision,
                                     {
                                         "llm_call_1":"llm_call_1",
                                         "llm_call_2":"llm_call_2",
                                         "llm_call_3":"llm_call_3"
                                     })
router_builder.add_edge("llm_call_1",END)
router_builder.add_edge("llm_call_2",END)
router_builder.add_edge("llm_call_3",END)

workflow = router_builder.compile()
state = workflow.invoke({"input":"给我写一个关于猫的笑话"})
print(state['output'])