import os
from dotenv import load_dotenv
from typing import TypedDict, Literal

from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END

# 加载环境变量
load_dotenv()

# 1. 定义状态 (State)
# 我们扩展了状态，增加了 'classification' 来保存决策结果，
# 以及 'response' 来保存最终的输出（无论是笑话还是解释）。
class GraphState(TypedDict):
    """
    Represents the state of our graph.

    Attributes:
        topic: The user's input topic.
        classification: The classification of the topic ('joke' or 'explanation').
        response: The final generated response.
    """
    topic: str
    classification: Literal["joke", "explanation"]
    response: str

# 2. 定义节点 (Nodes)
llm = ChatOpenAI(
    model="qwen-plus-latest", base_url=os.getenv("OPENAI_BASE_URL")
)

def route_topic(state: GraphState) -> dict[str, str]:
    """
    Decides whether to generate a joke or an explanation for the topic.

    Args:
        state: The current graph state.

    Returns:
        A dictionary with the classification.
    """
    print("---CLASSIFYING TOPIC---")
    topic = state["topic"]
    
    prompt = f"""Given the user's topic, classify it as either 'joke' or 'explanation'. 
Do not respond with more than one word.

Topic: {topic}
Classification:"""
    
    response = llm.invoke(prompt)
    classification = response.content.strip().lower()
    print(f"---CLASSIFICATION: {classification}---")
    
    return {"classification": classification}

def generate_joke(state: GraphState) -> dict[str, str]:
    """
    Generates a joke about the topic.
    """
    print("---GENERATING JOKE---")
    topic = state["topic"]
    prompt = f"Tell me a short joke about {topic}"
    joke = llm.invoke(prompt).content
    print(f"---JOKE: {joke}---")
    return {"response": joke}

def generate_explanation(state: GraphState) -> dict[str, str]:
    """
    Generates a brief explanation of the topic.
    """
    print("---GENERATING EXPLANATION---")
    topic = state["topic"]
    prompt = f"Provide a brief, one-paragraph explanation of {topic}"
    explanation = llm.invoke(prompt).content
    print(f"---EXPLANATION: {explanation}---")
    return {"response": explanation}

# 3. 定义条件边逻辑
def should_i_tell_joke_or_explain(state: GraphState) -> Literal["joke_node", "explanation_node"]:
    """
    This is the conditional edge. It returns the name of the next node to call.
    """
    print("---MAKING DECISION---")
    if state["classification"] == "joke":
        return "joke_node"
    else:
        return "explanation_node"

# 4. 构建图
workflow = StateGraph(GraphState)

# 添加节点
workflow.add_node("router", route_topic)
workflow.add_node("joke_node", generate_joke)
workflow.add_node("explanation_node", generate_explanation)

# 设置入口点
workflow.set_entry_point("router")

# 添加条件边
workflow.add_conditional_edges(
    "router",
    should_i_tell_joke_or_explain,
    {
        "joke_node": "joke_node",
        "explanation_node": "explanation_node",
    }
)

# 连接分支到终点
workflow.add_edge("joke_node", END)
workflow.add_edge("explanation_node", END)

# 编译图
app = workflow.compile()

# 5. 运行图
if __name__ == "__main__":
    print("---Running with a 'funny' topic---")
    inputs = {"topic": "dogs"}
    result = app.invoke(inputs)
    print("\nFinal Response:")
    print(result["response"])

    print("\n" + "="*50 + "\n")

    print("---Running with a 'serious' topic---")
    inputs = {"topic": "black holes"}
    result = app.invoke(inputs)
    print("\nFinal Response:")
    print(result["response"]) 