from typing import TypedDict, Annotated, List
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.checkpoint.memory import MemorySaver
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.agents import create_agent

# ============================================================================
# 0. 环境和模型设置 (Setup)
# ============================================================================

# 使用 Qwen-plus 作为推理模型
llm = ChatOpenAI(
    model="qwen-plus",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1/",
    api_key="sk-4adf3d4278414c21abf8f26fa9aaa267", # [!code warning] 请替换为您的真实API Key
    temperature=0,
    streaming=True 
)

# 定义共享状态
class AgentState(TypedDict):
    messages: Annotated[List[BaseMessage], add_messages]
    next: str

# ============================================================================
# 1. 定义底层工具 (Low-Level Tools)
# ============================================================================

@tool
def tavily_search(query: str) -> str:
    """搜索互联网以获取最新信息。"""
    print(f"--- 调用工具: tavily_search(query='{query}') ---")
    return f"检索结果：关于 '{query}' 的最新信息。"

@tool
def calculate(expression: str) -> float:
    """执行基本的数学计算。输入是Python数学表达式字符串。"""
    print(f"--- 调用工具: calculate(expression='{expression}') ---")
    try:
        return float(eval(expression))
    except Exception:
        return 0.0

# ============================================================================
# 2. 创建工人代理 (Worker Agents)
# ============================================================================

# --- 2.1 研究代理 ---
RESEARCH_PROMPT = (
    "你是一位专业的研究专家。你的任务是利用 'tavily_search' 工具获取最新信息。"
    "最后，返回一个简明、信息丰富的答案。"
)
research_agent = create_agent(llm, [tavily_search], system_prompt=RESEARCH_PROMPT)

# --- 2.2 数学代理 ---
MATH_PROMPT = (
    "你是一位数学专家。你必须使用 'calculate' 工具进行任何数字运算。"
    "只返回最终的数字结果或计算确认。"
)
math_agent = create_agent(llm, [calculate], system_prompt=MATH_PROMPT)

# ============================================================================
# 3. 创建主管图：路由节点 (Supervisor Router)
# ============================================================================

SUPERVISOR_PROMPT = (
    "你是一个中央任务调度主管，负责协调研究代理和数学代理来完成用户请求。"
    "你的任务是根据**完整的消息历史**，决定下一步应该将任务分配给哪个专业代理。"
    "【重要判断】如果最新消息是**专业代理返回的最终答案或总结**，且已充分回答用户问题，请返回 'FINISH'。"
    "如果请求需要查询外部信息，请返回 'research'。"
    "如果请求需要进行计算或数学操作，请返回 'math'。"
    "\n\n可用的代理名称: research, math"
)

supervisor_chain = (
    llm 
    | (lambda x: x.content.strip().lower())
)

def supervisor_router(state: AgentState) -> dict: 
    """调用 LLM 进行路由决策，并将结果写入状态的 'next' 键。"""
    print("--- 节点: supervisor (决策中...) ---")
    messages_to_invoke = [SystemMessage(content=SUPERVISOR_PROMPT)] + state["messages"] 
    decision = supervisor_chain.invoke(messages_to_invoke) 
    print(f"\n--- 主管决策: {decision} ---")

    # 精简路由逻辑
    if "research" in decision:
        next_node = "research"
    elif "math" in decision:
        next_node = "math"
    else:
        next_node = "FINISH"
        
    return {"next": next_node}

# ============================================================================
# 4. 构建 LangGraph 流程图 (Build the LangGraph)
# ============================================================================

# 定义父图（主管图）
supervisor_builder = StateGraph(AgentState)

# --- 4.1 添加节点 ---
supervisor_builder.add_node("supervisor", supervisor_router)
supervisor_builder.add_node("research", research_agent) # 直接使用 agent 作为节点
supervisor_builder.add_node("math", math_agent)

# --- 4.2 添加边和条件路由 ---
supervisor_builder.add_edge(START, "supervisor")

supervisor_builder.add_conditional_edges(
    "supervisor",
    lambda state: state["next"], 
    {"research": "research", "math": "math", "FINISH": END,},
)

# 从工人代理返回主管节点
supervisor_builder.add_edge("research", "supervisor")
supervisor_builder.add_edge("math", "supervisor")

# --- 4.3 编译并运行 ---
app = supervisor_builder.compile(checkpointer=MemorySaver())


# ============================================================================
# 5. 运行交互式循环 (Run Interactive Loop) - 修正调试输出
# ============================================================================
if __name__ == "__main__":
    
    print("="*30)
    print(" 欢迎使用 LangGraph 多代理主管系统 (DEBUG 模式)")
    print(" 流程调试信息 (Updates) 已开启")
    print("="*30)

    config = {"configurable": {"thread_id": "main_chat_thread_streaming"}}

    while True:
        try:
            user_input = input("\n👤 你: ")
            if user_input.lower() in ["exit", "退出"]:
                print("🤖 助手: 再见！")
                break
            if not user_input.strip():
                continue

            inputs = {"messages": [HumanMessage(content=user_input)]}
            print("\n🤖 助手 (流式输出): ", end="", flush=True)
            
            final_response_message = None

            # 使用 stream_mode=["updates", "messages"]
            stream_iterator = app.stream(
                inputs, 
                config=config, 
                stream_mode=["updates", "messages"]
            )

            print("\n--- LangGraph 流程追踪 (Updates) ---")

            # 循环处理 (mode, data)
            for mode, chunk_data in stream_iterator:
                
                # A. 处理 "messages" 流 (LLM Token) - 打印给用户的文本
                if mode == "messages":
                    message_chunk = chunk_data[0] 
                    if message_chunk.content:
                        # 打印 LLM Token，实现打字机效果
                        print(message_chunk.content, end="", flush=True)
                    
                    # 累积最终回复
                    if final_response_message:
                        final_response_message.content += message_chunk.content
                    else:
                        final_response_message = message_chunk

                # B. 处理 "updates" 流 (图节点状态) - 打印调试信息
                elif mode == "updates":
                    # chunk_data 是一个包含节点状态更新的字典，键是节点名
                    for node_name, update_data in chunk_data.items():
                         # 忽略开始和结束节点，只关注实际的节点执行
                        if node_name not in [START, END]:
                            # 打印节点名和它返回的状态数据（通常是 'next' 键）
                            print(f"\n[DEBUG] 节点 '{node_name}' 返回状态: {update_data}")


            print("\n" + "="*50)

        except KeyboardInterrupt:
            print("\n🤖 助手: 强制退出。再见！")
            break
        except Exception as e:
            print(f"\n发生错误: {e}")
    config = {"configurable": {"thread_id": "main_chat_thread_streaming"}}

    while True:
        try:
            user_input = input("\n👤 你: ")
            if user_input.lower() in ["exit", "退出"]:
                print("🤖 助手: 再见！")
                break
            if not user_input.strip():
                continue

            inputs = {"messages": [HumanMessage(content=user_input)]}
            print("\n🤖 助手: ", end="", flush=True)
            
            final_response_message = None

            # 使用 stream_mode=["updates", "messages"]
            stream_iterator = app.stream(
                inputs, 
                config=config, 
                stream_mode=["updates", "messages"]
            )

            # 循环处理 (mode, data)
            for mode, chunk_data in stream_iterator:
                
                # 处理 "messages" 流 (LLM Token)
                if mode == "messages":
                    message_chunk = chunk_data[0] 
                    if message_chunk.content:
                        print(message_chunk.content, end="", flush=True)
                    
                    if final_response_message:
                        final_response_message.content += message_chunk.content
                    else:
                        final_response_message = message_chunk

            print("\n" + "="*50)

        except KeyboardInterrupt:
            print("\n🤖 助手: 强制退出。再见！")
            break
        except Exception as e:
            print(f"\n发生错误: {e}")