import json
import uuid
from typing import Optional, List, Dict, Any

import uvicorn
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from langchain_core.messages import HumanMessage, ToolMessage, AIMessage

# 导入编译好的Graph应用
from my_hitl_app.graph import app_graph

# FastAPI 应用实例
api = FastAPI(
    title="Stateful Chat Service with Human-in-the-Loop",
    description="一个统一的/chat路由，处理需要人工审批的复杂任务",
)


# Pydantic模型用于请求和响应体
class ChatRequest(BaseModel):
    question: str
    thread_id: Optional[str] = None


class ChatResponse(BaseModel):
    thread_id: str
    status: str  # 'finished', 'paused_for_approval'
    response: Optional[str] = None
    tool_calls: Optional[List[Dict[str, Any]]] = None


@api.post("/chat", response_model=ChatResponse)
def chat(request: ChatRequest):
    """
    处理聊天请求。它可以发起新对话，也可以在需要人工审批时继续现有对话。
    - 首次请求: 提供 'question'。
    - 后续审批: 提供 'thread_id' 和 'question' (内容为 'approve', 'reject', 或修改后的指令)。
    """
    thread_id = request.thread_id or str(uuid.uuid4())
    config = {"configurable": {"thread_id": thread_id}}

    print(f"Chat thread id: {thread_id}")

    # 检查当前对话是否处于中断状态
    current_state = app_graph.get_state(config)
    if request.thread_id and not current_state:
        raise HTTPException(status_code=404, detail="会话未找到或已过期")

    # 一个更可靠的检查中断的方式是检查 next 属性
    is_interrupted = current_state and current_state.next

    if is_interrupted:
        # --- 情况A: 对话已中断，等待用户输入 (approve/reject) ---
        if request.question.lower() == "approve":
            print("--- User approved. Resuming with invoke(None, config) ---")
            # 【核心修复】
            # 用户批准，我们从中断处继续执行。
            # 输入为 None，告诉 LangGraph 直接执行挂起的任务（即'tools'节点）。
            resumed_state = app_graph.invoke(None, config)

        else:  # 用户拒绝或提供了修改意见
            print("--- User rejected/modified. Resuming with feedback ToolMessage ---")
            # 【逻辑优化】
            # 我们向Graph注入一个ToolMessage，告知Agent它的请求被拒绝或需要修改。
            # 这样Agent就可以根据这个新信息重新规划。
            # 从状态中获取最后一条AI消息的tool_calls
            last_message = current_state.values["messages"][-1]
            if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
                raise HTTPException(status_code=400, detail="No pending tool calls to reject.")

            rejection_message = f"用户拒绝了此操作。用户反馈: '{request.question}'"

            # 为每一个被拒绝的工具调用创建一个反馈消息
            tool_messages = [
                ToolMessage(content=rejection_message, tool_call_id=call["id"])
                for call in last_message.tool_calls
            ]

            # 将反馈作为新输入，让Agent重新决策
            resumed_state = app_graph.invoke({"messages": tool_messages}, config)

        output_state = resumed_state

    else:
        # --- 情况B: 正常对话流程，非中断状态 ---
        print("--- Starting new conversation or continuing non-interrupted flow ---")
        initial_input = {"messages": [HumanMessage(content=request.question)]}
        output_state = app_graph.invoke(initial_input, config)

    # --- 根据最终状态构建响应 ---
    if not output_state.get("next"):
        # Graph执行完毕
        final_answer = output_state["messages"][-1].content
        return ChatResponse(
            thread_id=thread_id,
            status="finished",
            response=final_answer
        )
    else:
        # Graph再次中断，等待下一次审批
        last_message = output_state["messages"][-1]
        return ChatResponse(
            thread_id=thread_id,
            status="paused_for_approval",
            response="Agent想要执行以下工具，请输入 'approve' 或 'reject' (可附带理由)。",
            tool_calls=last_message.tool_calls,
        )


if __name__ == "__main__":
    print("API服务已准备就绪。")
    print("在浏览器中打开 http://127.0.0.1:8000/docs 查看API文档。")
    uvicorn.run(api, host="0.0.0.0", port=8000)
