# langgraph_ollama_csv_pg.py (最终修正版)
from typing import Annotated, Sequence, TypedDict, Dict, Any
import operator

from langchain_ollama import ChatOllama
from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import create_tool_calling_agent, AgentExecutor # ✅ 导入 AgentExecutor
from langgraph.graph import StateGraph, END

# --- 导入工具 ---
from database_tools import tools, CSV_FILE_PATH

# --- 创建 Ollama LLM ---
llm = ChatOllama(
    model="qwen3:8b",  # llama3.1 或 qwen:7b
    temperature=0,
    base_url="http://localhost:11434"
)

# --- 创建 Agent 和 Executor ---
prompt = ChatPromptTemplate.from_messages([
    ("system", f"""You are a data analyst assistant... (your system message here) /nothink"""),
    MessagesPlaceholder(variable_name="agent_scratchpad"),
])

# ✅ 1. 创建 Agent (返回一个 Runnable)
agent_runnable = create_tool_calling_agent(llm, tools, prompt)

# ✅ 2. 创建 AgentExecutor - 这是关键！
# 它封装了 agent_runnable，并处理执行逻辑和状态。
agent_executor = AgentExecutor(agent=agent_runnable, tools=tools, verbose=False) # verbose=True for more logs

# --- 定义状态 ---
# ✅ 3. 更新状态定义，更通用，兼容 AgentExecutor
class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], operator.add]
    sender: str
    # AgentExecutor 可能需要其他字段，但通常 messages 足够。如果需要 intermediate_steps，可以添加，但通常不需要。

def call_model(state: AgentState) -> dict:
    """
    使用 AgentExecutor 执行 agent。
    """
    # 构建 AgentExecutor 的输入
    last_user_message = None
    for msg in reversed(state["messages"]):
        if isinstance(msg, HumanMessage):
            last_user_message = msg.content
            break
    if last_user_message is None:
        last_user_message = "" # fallback

    # ✅ 修复：移除 'callbacks' 键
    inputs = {
        "input": last_user_message,
        # "chat_history": [...]  # 如果你的 prompt 需要，可以包含，但 create_tool_calling_agent 通常依赖 agent_scratchpad
        # 移除了 "callbacks": None
    }

    try:
        # 调用 AgentExecutor
        result = agent_executor.invoke(inputs) # ✅ 现在不会有多余的 callbacks

        # 处理结果
        output_message = AIMessage(content=result["output"])
        return {"messages": [output_message], "sender": "agent"}

    except Exception as e:
        error_msg = f"Agent execution failed: {str(e)}"
        return {"messages": [AIMessage(content=error_msg)], "sender": "agent"}

# --- 工具执行节点保持不变 ---
def execute_tool(state: AgentState) -> dict:
    messages = state['messages']
    last_message = messages[-1]

    if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
        tool_call = last_message.tool_calls[0]
        tool_name = tool_call['name']
        tool_args = tool_call['args']

        selected_tool = next((t for t in tools if t.name == tool_name), None)
        if selected_tool:
            try:
                tool_result = selected_tool.invoke(tool_args)
                tool_message = ToolMessage(
                    content=str(tool_result),
                    name=tool_name,
                    tool_call_id=tool_call['id']
                )
                return {"messages": [tool_message], "sender": "tool"}
            except Exception as e:
                error_msg = f"Error executing tool {tool_name}: {str(e)}"
                tool_message = ToolMessage(content=error_msg, name=tool_name, tool_call_id=tool_call['id'])
                return {"messages": [tool_message], "sender": "tool"}
    return {"messages": [], "sender": "tool"}

# --- 构建图 ---
workflow = StateGraph(AgentState)
workflow.add_node("agent", call_model)
workflow.add_node("tool_executor", execute_tool)
workflow.set_entry_point("agent")

def should_call_tool(state: AgentState) -> str:
    messages = state['messages']
    if messages:
        last_message = messages[-1]
        if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
            return "tool_executor"
    return END

workflow.add_conditional_edges(
    "agent",
    should_call_tool,
    {
        "tool_executor": "tool_executor",
        END: END,
    },
)
workflow.add_edge("tool_executor", "agent")
app = workflow.compile()

# --- 运行示例 ---
if __name__ == "__main__":
    print(f"Data Analyst Assistant is ready! (Using langchain-ollama & AgentExecutor)")
    print(f"CSV File Path: {CSV_FILE_PATH}")
    print("Ask questions about the data...")

    while True:
        user_input = input("\nYou: ")
        if user_input.lower() in ['quit', 'exit', 'bye']:
            break

        # 创建初始状态
        initial_state = {"messages": [HumanMessage(content=user_input)], "sender": "user"}

        try:
            # 使用流式输出
            for output in app.stream(initial_state):
                for key, value in output.items():
                    if key != '__end__':
                        for msg in value.get("messages", []):
                            if isinstance(msg, AIMessage):
                                if hasattr(msg, 'tool_calls') and msg.tool_calls:
                                    call = msg.tool_calls[0]
                                    print(f"🛠️  Calling: {call['name']}({call['args']})")
                                else:
                                    print(f"💬 AI: {msg.content}")
                            elif isinstance(msg, ToolMessage):
                                print(f"🔧 Result: {msg.content}")
        except Exception as e:
            print(f"❌ Error in main loop: {e}")