from typing import TypedDict, Annotated, Literal
from langgraph.graph import StateGraph, END
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uuid
from typing import Dict, Optional

# 全局存储：模拟运行中的图实例（生产环境建议用 Redis 等）
RUNNING_GRAPHS: Dict[str, tuple] = {}

# 定义状态
class State(TypedDict):
    input: str
    ai_response: str
    human_feedback: Optional[str]
    approved: bool

# 节点函数
def generate_ai_response(state: State) -> State:
    # 简单模拟 AI 生成（实际可用 LLM）
    ai_resp = f"AI 建议回复: '{state['input']}' 的回应是 'Hello!'"
    return {"ai_response": ai_resp, "approved": False}

def await_human_feedback(state: State) -> dict:
    # 如果已有反馈，说明已通过 API 提交
    if state.get("human_feedback") is not None:
        state["human_feedback"] = "True"
        return state
    else:
        # 触发暂停，等待人工输入
        state["human_feedback"] = None
        return state  # 实际上我们会在此处暂停

def process_feedback(state: State) -> State:
    feedback = state["human_feedback"]
    if feedback.startswith("approve"):
        return {"approved": True}
    else:
        # 可以选择重新生成或终止
        return {"approved": False, "ai_response": "已拒绝，流程终止"}

# 构建图
def create_graph():
    graph = StateGraph(State)
    graph.add_node("generate", generate_ai_response)
    graph.add_node("await_feedback", await_human_feedback)
    graph.add_node("process", process_feedback)

    graph.set_entry_point("generate")
    graph.add_edge("generate", "await_feedback")

    # 条件边：如果已有反馈，进入 process；否则暂停
    def should_continue(state: State):
        if state.get("human_feedback") is not None:
            return "process"
        else:
            return "__end__"

    graph.add_conditional_edges(
        "await_feedback",
        should_continue,
        {
            "process": "process",
            "__end__": END
        }
    )
    graph.add_edge("process", END)

    return graph.compile()

# FastAPI App
app = FastAPI()

class HumanFeedbackRequest(BaseModel):
    session_id: str
    feedback: str  # e.g., "approve" 或 "reject: 修改后的文本"

# user_input = "Nihao"
#
# session_id = str(uuid.uuid4())
# graph = create_graph()
# # 初始状态
# initial_state = {"input": user_input, "ai_response": "", "human_feedback": None, "approved": False}
# # 执行到第一个暂停点（即 await_feedback）
# result = graph.invoke(initial_state)
# print(result)
# # 如果 result 中 human_feedback 仍为 None，说明已暂停
# RUNNING_GRAPHS[session_id] = (graph, result)
#
# graph, current_state = RUNNING_GRAPHS[session_id]
# # 注入人工反馈
# updated_state = {**current_state, "human_feedback": "你好"}
# # 继续执行图
# final_result = graph.invoke(updated_state)
# print(final_result)
# # 清理
# del RUNNING_GRAPHS[session_id]
# print(graph)

@app.post("/start")
def start_process(user_input: str):
    session_id = str(uuid.uuid4())
    graph = create_graph()
    # 初始状态
    initial_state = {"input": user_input, "ai_response": "", "human_feedback": None, "approved": False}
    # 执行到第一个暂停点（即 await_feedback）
    try:
        result = graph.invoke(initial_state)
        # 如果 result 中 human_feedback 仍为 None，说明已暂停
        RUNNING_GRAPHS[session_id] = (graph, result)
        return {
            "session_id": session_id,
            "ai_response": result["ai_response"],
            "status": "awaiting_human_feedback"
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/feedback")
def submit_feedback(feedback_req: HumanFeedbackRequest):
    session_id = feedback_req.session_id
    if session_id not in RUNNING_GRAPHS:
        raise HTTPException(status_code=404, detail="Session not found or already completed")

    graph, current_state = RUNNING_GRAPHS[session_id]

    # 注入人工反馈
    updated_state = {**current_state, "human_feedback": feedback_req.feedback}

    # 继续执行图
    final_result = graph.invoke(updated_state)

    # 清理
    del RUNNING_GRAPHS[session_id]

    return {
        "session_id": session_id,
        "final_result": final_result
    }

import uvicorn

uvicorn.run(app, host="0.0.0.0", port=8080)