# import os
# import json
# import asyncio

# from typing import Annotated, Literal
# from typing_extensions import TypedDict
# from pydantic import BaseModel, Field

# from conf.config import settings
# from common.logger import logger
# # from common import postgres_manager, get_pg_pool
# # from common import redis_manager, get_redis_client, get_redis_pool

# # from langchain.chat_models import ChatOpenAI
# # from langchain_community.chat_models import ChatOpenAI
# from langchain_openai import ChatOpenAI
# from langchain.schema import HumanMessage, AIMessage

# from langgraph.graph import StateGraph, START, END
# from langgraph.graph.message import add_messages
# from langgraph.prebuilt import ToolNode, tools_condition
# from langgraph.checkpoint.memory import MemorySaver
# from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver
# from langgraph.types import interrupt, Command

# from agents.mcps.oa.get_psndoc import get_personal_data
# from agents.mcps.oa.get_cal_has_sumhour import get_duration_vacation
# from agents.mcps.oa.get_kq import get_dong_yang_kq
# from agents.mcps.oa.post_qianka import qianka_get_kq, qianka_post
# from agents.memory.pgdb_pool import init_postgres_pool
# from agents.scipt.join_states import join_state
# from agents.prompt.prompts import HOLIDAYS_DURATION_PROMPT, PERSONAL_DATA_PROMPT, QIANKA_PROMPT, COMMON_CHATBOT_PROMPT


# async def langgraph_mcp_agent(checkpointer: AsyncPostgresSaver):

#     logger.info("调用 langgraph_mcp_agent 函数")
#     local_qwen3 = ChatOpenAI(
#         model=settings.QWEN_MODEL,
#         api_key=settings.QWEN_API_KEY,
#         base_url=settings.QWEN_BASE_URL,
#         streaming=True
#     )

#     # class MessageClassifier(BaseModel):
#     #     message_type: Literal["qiandan", "sql_query", "personal_data"]=Field(
#     #         ...,
#     #         description="将用户消息分类为以下三种之一：签单(qiandan)，sql查询(sql_query)，个人信息(personal_data)"
#     #     )

#     class State(TypedDict):
#         messages: Annotated[list, add_messages]

#     graph_builder = StateGraph(State)

#     tools = [get_personal_data, get_duration_vacation,
#              get_dong_yang_kq, qianka_get_kq]
#     local_qwen3_with_tools = local_qwen3.bind_tools(tools)

#     async def chatbot(state: State):
#         # print(state["messages"])
#         ivoke_messages = [{"role": "system", "content": COMMON_CHATBOT_PROMPT}]
#         for i in state["messages"]:
#             if isinstance(i, HumanMessage):
#                 ivoke_messages.append({"role": "user", "content": i.content})
#             elif isinstance(i, AIMessage):
#                 ivoke_messages.append(
#                     {"role": "assistant", "content": i.content})

#         response = await local_qwen3_with_tools.ainvoke(ivoke_messages)
#         return {"messages": [response]}

#     async def search_holidays_duration_chatbot(state: State):
#         last_message = state["messages"][-1]
#         all_chunks = []
#         logger.info(f"调用年假函数 search_holidays_duration_chatbot {last_message}")
#         async for chunk in local_qwen3_with_tools.astream([
#             {
#                 "role": "system",
#                 "content": HOLIDAYS_DURATION_PROMPT
#             },
#             {"role": "user", "content": last_message.content}
#         ]):
#             all_chunks.append(chunk.content)
#             # print('chunk:', chunk)
#             aim_message = join_state(chunk, all_chunks)
#             if aim_message != '':
#                 state['messages'].append(aim_message)
#             yield {"messages": [chunk]}

#     async def person_data_chatbot(state: State):
#         last_message = state["messages"][-1]
#         all_chunks = []
#         logger.info(f"调用个人信息函数 person_data_chatbot {last_message}")
#         # print("person_data_chatbot", last_message)
#         async for chunk in local_qwen3_with_tools.astream([
#             {
#                 "role": "system",
#                 "content": PERSONAL_DATA_PROMPT
#             },
#             {"role": "user", "content": last_message.content}
#         ]):
#             all_chunks.append(chunk.content)
#             # print('chunk:', chunk)
#             aim_message = join_state(chunk, all_chunks)
#             if aim_message != '':
#                 state['messages'].append(aim_message)

#             # 检查response_metadata是否存在并获取finish_reason
#             yield {"messages": [chunk]}

#     async def qianka_chatbot(state: State):
#         last_message = state["messages"][-1]
#         user_id = json.loads(last_message.content)[0].get('empNO')
#         emp_dates = [item.get('empDate')
#                      for item in json.loads(last_message.content)]
#         attends = [item.get('attends')
#                    for item in json.loads(last_message.content)]
#         qianka_args = {
#             "user_id": user_id,
#             "emp_dates": emp_dates,
#             "attends": attends
#         }
#         qianka_key = f'qianka_{user_id}'
#         qianka_args = json.dumps(qianka_args)
#         redis = await get_redis_client()
#         await redis.set(qianka_key, qianka_args)

#         all_chunks = []
#         logger.info(f"调用签卡函数 qianka_chatbot {last_message}")
#         async for chunk in local_qwen3_with_tools.astream([
#             {
#                 "role": "system",
#                 "content": QIANKA_PROMPT
#             },
#             {"role": "user", "content": last_message.content}
#         ]):
#             all_chunks.append(chunk.content)
#             # print('chunk:', chunk)
#             aim_message = join_state(chunk, all_chunks)
#             if aim_message != '':
#                 state['messages'].append(aim_message)

#             # 检查response_metadata是否存在并获取finish_reason
#             yield {"messages": [chunk]}

#     async def qianka_interrupt(state: State) -> Command[Literal["approved_path", "rejected_path"]]:
#         is_approved = await asyncio.to_thread(interrupt, "确认签卡吗？(yes or no):")
#         if is_approved in ["approve", "yes", "是", "y", "Y", "确认", "同意"]:
#             return Command(goto="approved_path")
#         else:
#             return Command(goto="rejected_path")

#     async def classify_tool(state: State) -> Literal["chatbot", "search_holidays_duration_chatbot", "person_data_chatbot", "qianka_chatbot"]:
#         last_tool_name = state["messages"][-1].name
#         if last_tool_name == "get_duration_vacation":
#             return "search_holidays_duration_chatbot"

#         elif last_tool_name == "get_personal_data":
#             return "person_data_chatbot"

#         elif last_tool_name == "qianka_get_kq":
#             return "qianka_chatbot"

#         else:
#             return "chatbot"

#     async def approved_node(state: State) -> State:
#         redis = await get_redis_client()
#         user_id = json.loads(await redis.get(qianka_key))

#         print("user_id:", user_id)
#         print("emp_dates:", emp_dates)
#         print("attends:", attends)
#         # del emp_dates, attends, user_id
#         # qianka_post(user_id= user_id, emp_dates=emp_dates, attends=attends)
#         return {"messages": ['✅ Approved path taken.']}

#     # Alternative path after rejection
#     async def rejected_node(state: State) -> State:
#         print("❌ Rejected path taken.")
#         return {"messages": ['❌ Rejected path taken.']}

#     async def end_note(state: State):
#         logger.info(f"end_note节点输出：{state}")
#         # print("===========end_node:===========", "\n", state, "\n")

#     tool_node = ToolNode(tools=tools)

#     graph_builder.add_node("chatbot", chatbot)
#     graph_builder.add_node("tools", tool_node)
#     graph_builder.add_node("search_holidays_duration_chatbot",
#                            search_holidays_duration_chatbot)
#     graph_builder.add_node("person_data_chatbot", person_data_chatbot)
#     graph_builder.add_node("qianka_chatbot", qianka_chatbot)
#     graph_builder.add_node("qianka_interrupt", qianka_interrupt)
#     graph_builder.add_node("approved_path", approved_node)
#     graph_builder.add_node("rejected_path", rejected_node)
#     graph_builder.add_node("end_note", end_note)

#     graph_builder.add_edge(START, "chatbot")
#     graph_builder.add_conditional_edges(
#         "chatbot",
#         tools_condition,
#     )  # 条件边，当 "chatbot" 节点执行完成后，判断是否调用工具
#     graph_builder.add_conditional_edges(
#         "tools",
#         classify_tool,
#     )
#     graph_builder.add_edge("qianka_chatbot", "qianka_interrupt")
#     graph_builder.add_edge('search_holidays_duration_chatbot', "end_note")
#     graph_builder.add_edge('person_data_chatbot', "end_note")
#     graph_builder.add_edge("chatbot", "end_note")
#     graph_builder.add_edge("approved_path", "end_note")
#     graph_builder.add_edge("rejected_path", "end_note")
#     graph_builder.add_edge("end_note", END)

#     # memory = MemorySaver()
#     # graph = graph_builder.compile(checkpointer=memory)

#     # config = {'configurable': {'thread_id': '136543725262656'}}

#     try:
#         # await postgres_manager.initialize()
#         # pool = await get_pg_pool()
#         # # 使用异步上下文管理器获取连接
#         # checkpointer = AsyncPostgresSaver(pool)
#         # await checkpointer.setup()
#         graph = graph_builder.compile(checkpointer=checkpointer)

#         # memory = await checkpointer.aget_tuple(config)
#         # print(memory)
#         # with open('./dyg_oa/main.png', "wb") as f:
#         #     f.write(graph.get_graph().draw_png())
#         # print(f"Graph imagemage 已经保存到 main.png！")
#         logger.info("graph构建成功")
#     except Exception as e:
#         print(f"graph构建失败, {e}")
#     return graph


# # if __name__ == "__main__":
# #     graph = langgraph_mcp_agent()

# #     def stream_graph_updates(user_input: str):
# #         for event in graph.stream(
# #             {"messages": [{"role": "user", "content": user_input}]},
# #             stream_mode="values"
# #         ):
# #             lastest_message = event["messages"][-1]
# #             if lastest_message.type == "ai":  # 只输出 AI 消息
# #                 print(lastest_message.content)

# #     while True:
# #         user_input = input("User: ")
# #         if user_input.lower() in ["quit", "exit", "q"]:
# #             print("Goodbye!")
# #             break
# #         stream_graph_updates(user_input)
