import json

from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph,START,END
from typing_extensions import TypedDict
from langgraph.func import task,entrypoint

from common.ali_qwen import get_model

llm = get_model()



#图中的节点，每个节点处理特定的路由
@task
def llm_call_1(input:str):
    """写一个故事"""
    result = llm.invoke(input=input)
    return {"output":result.content}

@task
def llm_call_2(input:str):
    """写一个笑话"""
    result = llm.invoke(input=input)
    return {"output":result.content}

@task
def llm_call_3(input:str):
    """写一首歌"""
    result = llm.invoke(input=input)
    return {"output":result.content}

def llm_call_router(input:str):
    """使用结构化输出将输入路由到适当的节点"""
    model = ChatOpenAI(
        model="qwen-plus",
        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
        api_key="sk-965dc39b016c49ecbe29de180f4db2b6",
        # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        model_kwargs={"response_format": {"type":"json_object"}},
    )
    ai_msg = model.invoke(
        [
            SystemMessage(content="You are a router that directs user input to the appropriate handler.Return a"
                                  " JSON object whit 'step' key and one of these values: 'story','joke','poem'. for example:"
                                  "{'step':'joke'}"),
            HumanMessage(content=input),
        ]
    )
    decision = json.loads(ai_msg.content)
    return {"decision":decision['step']}

@entrypoint()
def router_workflow(input:str):
    next_step = llm_call_router(input)["decision"]

    if next_step == "story":
        llm_call = llm_call_1
    elif next_step == "joke":
        llm_call = llm_call_2
    elif next_step == "poem":
        llm_call = llm_call_3

    return llm_call(input)

for step in router_workflow.stream("给我写一个关于猫的笑话",stream_mode="updates"):
    print(step)
    print("\n")