import io
from typing import TypedDict

from PIL import Image as PILImage

from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessage,SystemMessage,HumanMessage

from langgraph.graph import StateGraph,START,END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import tools_condition

api_key = "sk-6S0PtpNia71gjcfwSsDPsJ9mGqsVPr2XRQzAx1dHbJS7RW4t"
api_base="https://chatapi.littlewheat.com/v1"

llm = ChatOpenAI(model="gpt-4o",api_key=api_key,base_url=api_base)

# 定义状态模式
class State(TypedDict):
    user_input:str
    model_response:str
    user_approval:str

# 定义用于大模型交互的节点
def call_model(state:State):
    messages = state["user_input"]
    if '删除' in state["user_input"]:
        state["user_approval"] = f"用户输入的指令是:{state['user_input']},请人工确认是否执行!"
        print(state)
    else:
        response = llm.invoke(messages)
        state["user_approval"] = "直接执行!"
        state["model_response"] = response
    # response = llm.invoke(state["user_input"])
    # state["user_approval"] = "直接执行!"
    # state["model_response"] = response
    # print(state)
    return state

# 定义人工介入的breakpoint内部的执行逻辑
def execute_users(state:State):
    print('execute_users')
    if state["user_approval"] == "是":
        response = "您的删除请求已经获得管理员的批准并成功执行。如果您有其他问题或需要进一步的帮助，请随时联系我们。"
        return {"model_response":AIMessage(response)}
    elif state["user_approval"] == "否":
        response = "对不起，您当前的请求是高风险操作，管理员不允许执行！";
        return {"model_response":AIMessage(response)}
    else:
        return state

# 定义翻译节点
def translate_message(state:State):
    system_prompt = """
        Please translate the received text in any language into English as output
        """
    messages = state["model_response"]
    messages = [SystemMessage(content=system_prompt)] + [HumanMessage(content=messages.content)]
    response = llm.invoke(messages)
    return {"model_response":response}

# 构建状态图
builder = StateGraph(State)

# 向图中添加节点
builder.add_node("call_model", call_model)
builder.add_node("execute_users", execute_users)
builder.add_node("translate_message", translate_message)

# 构建边
builder.add_edge(START, "call_model")
builder.add_edge("call_model", "execute_users")
builder.add_edge("execute_users", "translate_message")
builder.add_edge("translate_message", END)
# builder.add_edge("call_model","translate_message")
# builder.add_edge("translate_message",END)

# 设置 checkpointer，使用内存存储
memory = MemorySaver()

# 在编译图的时候，添加短期记忆，并使用interrupt_before参数 设置 在 execute_users 节点之前中止图的运行，等待人工审核
graph = builder.compile(checkpointer=memory, interrupt_before=["execute_users"])

def display(image_data):
    image = PILImage.open(io.BytesIO(image_data))
    image.show()

# display(graph.get_graph().draw_mermaid_png())



# 创建一个线程
config = {"configurable":{"thread_id":"11232"}}

def run_dialogue(graph1,config1,all_chunks=[]):
    while True:
        # 接收用户输入
        user_input = input("请输入您的消息（输入'退出'结束对话）：")
        if user_input.lower() == "退出":
            break

        # 运行图，直至到断点的节点
        for chunk in graph1.stream({"user_input": user_input}, config1, stream_mode="values"):
            all_chunks.append(chunk)

        # 处理可能的审批请求
        last_chunk = all_chunks[-1]
        if last_chunk["user_approval"] == f"用户输入的指令是:{last_chunk['user_input']},请人工确认是否执行!":
            user_approval = input(f"当前用户的输入是：{last_chunk['user_input']}, 请人工确认是否执行！请回复 是/否。")
            graph1.update_state(config1, {"user_approval": user_approval})

        # 继续执行图
        for chunk in graph1.stream(None, config1, stream_mode="values"):
            all_chunks.append(chunk)

        # 显示最终模型的响应
        print(f"人工智能助理：{all_chunks[-1]["model_response"]}")

# 使用该函数运行对话
run_dialogue(graph,config)

# for chunk in graph.stream(input={"user_input": "请介绍下自己"}, config=config, stream_mode="values"):
#      print(chunk)

# for chunk in graph.stream(input={"user_input": "我将在数据库中删除 id 为 xigualaoshi 的所有信息"}, config=config, stream_mode="values"):
#      print(chunk)


# call_model({"user_input": "请介绍下自己"})