import uvicorn
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse
from langchain_core.messages import HumanMessage
import uuid

from human_in_loop.my_graph import my_graph

app_fastapi = FastAPI()

# 存储正在进行的对话的会话ID (session_id) 和 LangGraph state_id
# 实际生产中，会话ID和state_id需要持久化到数据库或Redis
active_sessions = {}  # {session_id: thread_id}


@app_fastapi.post("/chat")
async def chat_with_ai(request: Request):
    data = await request.json()
    user_message_content = data.get("message")
    session_id = data.get("session_id")

    if not user_message_content:
        raise HTTPException(status_code=400, detail="Message content is required.")

    if not session_id:
        # 如果没有session_id，创建一个新的会话
        session_id = str(uuid.uuid4())
        thread_id = str(uuid.uuid4())  # LangGraph 内部的线程ID
        active_sessions[session_id] = thread_id
        # 初始状态，清除 user_input
        initial_state = {"messages": [HumanMessage(content=user_message_content)], "user_input": ""}
        config = {"configurable": {"thread_id": thread_id}}

        # 启动LangGraph执行
        # 使用stream()可以处理中断，并获取每次迭代的结果
        # 这里为了简化，我们假设第一个run就会达到人机交互点或者完成
        try:
            # Important: LangGraph's stream() will yield events.
            # When an interrupt occurs, the state will be checkpointed.
            # We need to capture the *last* state yielded or the state from the interrupt

            # Start the graph with initial input
            # app.invoke will run until an interrupt or END
            result = my_graph.invoke(initial_state, config=config)

            if "human_input_required" in result.get("user_input", ""):
                # 假设人机节点返回了一个特定字符串来表示需要人工输入
                # 实际中，LangGraph的interrupt会直接暂停执行
                # 你需要检查 `app.get_state(config)` 来确认是否暂停
                current_state = my_graph.get_state(config)
                # print(f"DEBUG: Graph paused. Current state: {current_state.values}") # current_state.values 包含了所有节点的状态
                return JSONResponse({
                    "session_id": session_id,
                    "status": "human_input_required",
                    "ai_message": result["messages"][-1].content,  # AI的最后一条消息，可能是问题
                    "thread_id": thread_id  # 返回thread_id以便后续恢复
                })
            else:
                return JSONResponse({
                    "session_id": session_id,
                    "status": "completed",
                    "ai_message": result["messages"][-1].content
                })

        except Exception as e:
            # 捕获LangGraph中断等异常，并处理
            # 实际上，LangGraph的interrupt不会直接抛出异常，而是让invoke返回一个特殊的"pend"状态或直接暂停
            # 我们需要更精细地处理 LangGraph 的中断机制
            # For simplicity, if we get here, it implies an unhandled error or the graph truly finished.
            current_state = my_graph.get_state(config)
            if current_state and current_state.next:  # Check if the graph is paused/waiting for next step
                return JSONResponse({
                    "session_id": session_id,
                    "status": "human_input_required",
                    "ai_message": current_state.values["messages"][-1].content,
                    "thread_id": thread_id
                })
            else:
                print(f"Error during initial graph execution: {e}")
                raise HTTPException(status_code=500, detail=f"AI processing error: {e}")

    else:
        # 恢复现有会话
        thread_id = active_sessions.get(session_id)
        if not thread_id:
            raise HTTPException(status_code=404, detail="Session not found or expired.")

        config = {"configurable": {"thread_id": thread_id}}

        # 获取当前LangGraph的状态
        current_langgraph_state = my_graph.get_state(config)

        # 更新用户的输入到状态中
        # 注意: 这里根据你的 `human_intervene` 逻辑来决定如何更新 state
        # 我们这里直接更新 `user_input` 字段
        updated_state = {
            "messages": current_langgraph_state.values.get("messages", []) + [
                HumanMessage(content=user_message_content)],
            "user_input": user_message_content  # 用户的回复
        }

        # 恢复LangGraph执行
        try:
            # `app.invoke` will continue from the last checkpoint
            result = my_graph.invoke(updated_state, config=config)

            if "human_input_required" in result.get("user_input", ""):  # 再次检查是否需要人工输入
                return JSONResponse({
                    "session_id": session_id,
                    "status": "human_input_required",
                    "ai_message": result["messages"][-1].content,
                    "thread_id": thread_id
                })
            else:
                return JSONResponse({
                    "session_id": session_id,
                    "status": "completed",
                    "ai_message": result["messages"][-1].content
                })

        except Exception as e:
            current_state = my_graph.get_state(config)
            if current_state and current_state.next:
                return JSONResponse({
                    "session_id": session_id,
                    "status": "human_input_required",
                    "ai_message": current_state.values["messages"][-1].content,
                    "thread_id": thread_id
                })
            else:
                print(f"Error during resuming graph execution: {e}")
                raise HTTPException(status_code=500, detail=f"AI resuming error: {e}")


if __name__ == "__main__":
    # Corrected: Pass the application as an import string
    # "server:app_fastapi" means "look in the file 'server.py' for the object named 'app_fastapi'"
    uvicorn.run("server:app_fastapi", host="0.0.0.0", port=8000, reload=True)
