import asyncio
from datetime import datetime
from typing import Dict, Optional, Any, Literal
import json
from langfuse.langchain import CallbackHandler
from asgiref.sync import sync_to_async
from copilotkit.langgraph import copilotkit_emit_state, copilotkit_emit_tool_call, copilotkit_emit_message
from langchain_core.messages import ToolMessage, BaseMessage, convert_to_openai_messages
from langchain_community.embeddings import DashScopeEmbeddings

from langchain_core.runnables import RunnableConfig
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver
from langgraph.constants import END
from langgraph.graph.state import CompiledStateGraph, StateGraph
from langgraph.types import interrupt, StateSnapshot
from openai import OpenAIError
from psycopg_pool import AsyncConnectionPool
from app.core.longmemory.producer import RocketMQManager
from app.core.config import settings, Environment
from app.core.langgraph.tools import sampletools
from app.core.logging import logger
from app.core.metrics import llm_inference_duration_seconds
from app.core.prompts import SYSTEM_PROMPT
from app.core.vector_store.vector_store_manager import KnowledgeBaseManager
from app.schemas import Message
from app.schemas.graph import SampleGraphState
from app.schemas.interrupt_approval_factory import create_name_approval_request, create_tool_approval_request, \
    create_tool_result_approval_request
from app.utils import prepare_messages, dump_messages
from app.utils.graph import prepare_messages_no_trim



class SampleAgent:
    def __init__(self):
        self.llm = ChatOpenAI(
            base_url=settings.LLM_BASE_URL,
            model=settings.LLM_MODEL, #deepseek-chat
            temperature=settings.DEFAULT_LLM_TEMPERATURE, #0.7
            api_key=settings.LLM_API_KEY, #sk-cb3aacb93d554d53a0890c5b00d6faa9
            max_tokens=settings.MAX_TOKENS, #2048
            **self._get_model_kwargs(), # 根据当前运行环境（开发/生产）返回不同的模型参数设置：
        ).bind_tools(sampletools)
        self.tools_by_name = {tool.name: tool for tool in sampletools}#创建一个字典，将工具名称映射到工具对象，便于通过名称快速查找工具
        self._connection_pool: Optional[AsyncConnectionPool] = None #初始化数据库连接池为None，用于后续异步数据库操作 optional用于表示 某个变量可以是特定类型，也可以是 None
        self.rocketmq_manager = self._init_rocketmq_manager()
        """
        AsyncConnectionPool是来自psycopg_pool库的异步PostgreSQL连接池
        功能：管理多个数据库连接，避免频繁创建/关闭连接的开销
        异步支持：与Python的async/await语法兼容，适用于异步应用
        连接复用：维护一组可用连接，请求时分配，使用后归还
        性能优化：减少连接建立时间，提高数据库操作效率
        资源管理：自动处理连接的生命周期，避免连接泄漏
        在项目中用于支持LangGraph的异步状态保存和检查点功能
        """
        self._graph: Optional[CompiledStateGraph] = None #初始化图对象为None，用于存储编译后的LangGraph工作流
        logger.info("llm初始化", model=settings.LLM_MODEL, environment=settings.ENVIRONMENT.value)

    def _init_rocketmq_manager(self):
        """初始化 RocketMQ 管理器"""
        try:
            logger.info("RocketMQ 管理器初始化")
            return RocketMQManager()

        except Exception as e:
            logger.error("RocketMQ 管理器初始化失败", error=str(e))
            return None
    def _get_model_kwargs(self) -> Dict[str, Any]:
        """根据开发/生产环境来返回不同的模型参数.

        Returns:
            Dict[str, Any]: 根据开发/生产环境来返回不同的模型参数.
        """
        model_kwargs = {}

        # 开发环境
        if settings.ENVIRONMENT == Environment.DEVELOPMENT:
            model_kwargs["top_p"] = 0.8

        # 生产环境
        elif settings.ENVIRONMENT == Environment.PRODUCTION:
            model_kwargs["top_p"] = 0.95
            model_kwargs["presence_penalty"] = 0.1
            model_kwargs["frequency_penalty"] = 0.1

        return model_kwargs

    async def _get_connection_pool(self) -> AsyncConnectionPool:
        """创建和管理PostgreSQL数据库连接池
        单例模式：检查self._connection_pool是否已存在，避免重复创建
        环境配置：根据环境设置连接池大小
        连接池创建：使用AsyncConnectionPool创建异步连接池，配置自动提交、超时等参数
        异常处理：创建失败时记录日志，生产环境优雅降级
        Returns:
            返回已创建或已存在的连接池实例
        """
        # 单例
        if self._connection_pool is None:

            try:
                # 根据环境配置连接池大小
                max_size = settings.POSTGRES_POOL_SIZE # 5 开发环境
                # 创建异步 PostgreSQL 连接池实例
                logger.info("数据库连接池创建",url=settings.POSTGRES_URL,max_size=max_size, environment=settings.ENVIRONMENT.value)

                self._connection_pool = AsyncConnectionPool(
                    settings.POSTGRES_URL,## 数据库连接 URL postgresql://postgres:123456@127.0.0.1:5432/ti-smart
                    open=False, # 不立即打开连接
                    max_size=max_size,  # 设置连接池最大大小
                    kwargs={# 连接参数配置
                        "autocommit": True,# 自动提交事务
                        "connect_timeout": 5, # 连接超时时间（秒）
                        "prepare_threshold": None, # 禁用预处理语句阈值
                    },
                )

                # 异步打开连接池
                await self._connection_pool.open()
                # 记录连接池创建成功的日志
                logger.info("数据库连接池创建成功", max_size=max_size, environment=settings.ENVIRONMENT.value)
            except Exception as e:
                # 记录连接池创建失败的日志
                logger.error("数据库连接池创建失败", error=str(e), environment=settings.ENVIRONMENT.value)
                # 在生产环境中，我们可能希望优雅地降级
                if settings.ENVIRONMENT == Environment.PRODUCTION:
                    # 记录继续运行但没有连接池的警告日志
                    logger.warning("continuing_without_connection_pool", environment=settings.ENVIRONMENT.value)
                    # 返回 None 而不是抛出异常
                    return None
                # 在非生产环境中重新抛出异常
                raise e
        # 返回已创建的连接池实例
        return self._connection_pool
    async def _getname(self, state: SampleGraphState,config: RunnableConfig):
        # 修改configurable中的值
        logger.info("进入getname节点")
        config["configurable"]["new_key"] = "new_value"
        logger.info("设置configurable中的值测试")

        if not state.get("agent_name"):
            # 构造中断请求，在schemas模块下面一个interrupt包，里面专门封装不同的中断请求
            approval_request = create_name_approval_request("给我取个名字吧")
            state["agent_name"] = interrupt(approval_request)
        # authTokenFrom_useCoAgent是为了测试前端useCoAgent组件初始化的时候配置conf里面的参数传到节点的config里面
        authTokenFrom_useCoAgent = config['configurable'].get('authTokenFrom_useCoAgent', "没有authTokenFrom_useCoAgent")
        logger.info(f"authTokenFrom_useCoAgent:{authTokenFrom_useCoAgent}")
        return state

    async def _long_term_memory(self, state: SampleGraphState, config: RunnableConfig) -> dict:
        """处理长期记忆的节点，将用户输入发送到 RocketMQ

        Args:
            state: 当前对话状态
            config: 运行配置

        Returns:
            dict: 空更新状态
        """
        try:
            # 获取最后一条用户消息
            last_message = state["messages"][-1] if state["messages"] else None
            authTokenFrom_useCoAgent = config['configurable'].get('authTokenFrom_useCoAgent',
                                                                  "defaultTokenFrom_userCoAgent")

            if last_message and hasattr(last_message, 'content') and self.rocketmq_manager:
                # 确保生产者已启动
                if self.rocketmq_manager.start_producer():
                    # 准备消息体
                    message_body = last_message.content

                    # 发送消息到 MQ
                    success = self.rocketmq_manager.send_message(
                        message_body,
                        tags=authTokenFrom_useCoAgent,
                        topic="userMessages"
                    )

                    if success:
                        logger.info("用户消息已发送到长期记忆队列", session_id=state.get("session_id", ""))
                    else:
                        logger.error("发送消息到长期记忆队列失败", session_id=state.get("session_id", ""))
                else:
                    logger.error("无法启动 RocketMQ 生产者")
            else:
                logger.info("没有用户消息需要发送到长期记忆或 RocketMQ 管理器未初始化")

        except Exception as e:
            logger.error("处理长期记忆时出错", error=str(e))

        # 返回空更新，不影响主工作流
        return {}

    async def _rag(self, state: SampleGraphState,config: RunnableConfig) -> dict:
        """知识库中检索知识"""
        # 获得检索器
        # 检查是否有知识库索引
        if not state.get("knowledge_index"):
            logger.info("没有提供知识库索引，跳过检索")
            return {}
        # 更新搜索状态 推送到前端
        state["searches"]=[]
        state["searches"].append({"query": f"在知识库中检索...", "done": False})
        await copilotkit_emit_state(config, state)
        await asyncio.sleep(0.5)
        try:
            # 创建知识库管理器实例
            knowledge_manager = KnowledgeBaseManager(index_name=state["knowledge_index"],k=4)

            # 获取最新的用户消息作为查询
            last_message = state["messages"][-1] if state["messages"] else None
            if not last_message or not hasattr(last_message, 'content'):
                logger.info("没有找到有效的用户消息用于检索")
                return {}

            query = last_message.content
            logger.info(f"使用查询在知识库中检索: {query}")



            # 执行检索 返回的是List of relevant documents.
            docs = await knowledge_manager.retriever.aget_relevant_documents(query)            # 标记检索完成
            state["searches"][-1]["done"] = True
            await copilotkit_emit_state(config, state)
            await asyncio.sleep(0.5)

            # 将检索到的文档添加到状态中
            if docs:
                retrieved_content = "\n\n".join([doc.page_content for doc in docs])
                logger.info(f"检索到 {len(docs)} 个相关文档")
                return {"knowledge": retrieved_content}
            else:
                logger.info("未在知识库中找到相关文档")
                return {}

        except Exception as e:
            logger.error(f"知识库检索过程中出错: {str(e)}")
            # 标记检索失败
            if state["searches"]:
                state["searches"][-1]["done"] = True
                await copilotkit_emit_state(config, state)
            return {}

    async def _cleanup_state(self, state: SampleGraphState) -> SampleGraphState:
        """清理agent状态，清除knowledge_index和knowledge字段

        Args:
            state: 当前的agent状态

        Returns:
            清理后的状态
        """
        logger.info("清理agent状态")
        # state["knowledge_index"] = None
        state["knowledge"] = None
        state["searches"] = []
        return state
    async def _chat(self, state: SampleGraphState,config: RunnableConfig) -> dict:
        """处理聊天请求并生成AI响应的核心函数

        Args:
            state (GraphState):  对话的当前状态。

        Returns:
            dict: 包含新消息的更新状态。
        """
        # 使用prepare_messages函数处理消息，添加系统提示并优化消息历史 可以参照prompts\_init_文件下的案例创建自己的系统提示
        # 创建一个无trim的预处理消息列表 deepseek模型不支持tokens计数
        logger.info(f"chat节点入参state:{state}")
        logger.info(f"chat节点入参config:{config}")
        thread_id=config["configurable"]["thread_id"]
        state["searches"] = [
            {"query": "初始化", "done": False},
            {"query": "检索资料...", "done": False},
            {"query": "生成答案...", "done": False},
        ]
        await copilotkit_emit_state(config, state)

        for search in state["searches"]:
            await asyncio.sleep(0.5)
            search["done"] = True
            await copilotkit_emit_state(config, state)
        logger.info("向前端推送agent状态")
        # 向前端推送message
        intermediate_message = "我正在处理，请耐心等待一下哦..."
        await copilotkit_emit_message(config, intermediate_message)

        state["session_id"]=thread_id
        language=state.get("language","中文")
        logger.info(f"chat从config的configurable中取出thread_id:{thread_id}")
        if not state.get("knowledge"):
            SYSTEM_PROMPT_NEW = SYSTEM_PROMPT + f"\n请用{language}回答！不允许用其他语言！"
        else:
            SYSTEM_PROMPT_NEW = SYSTEM_PROMPT + f"\n请用{language}回答！不允许用其他语言！\n以下是知识库内容：\n{state['knowledge']}"
        messages = prepare_messages_no_trim(state["messages"], SYSTEM_PROMPT_NEW)
        logger.info(f"chat节点处理后发给大模型的的messages:{messages}")
        # 初始化LLM调用计数器
        llm_calls_num = 0

        #  # 根据环境配置重试次数
        max_retries = settings.MAX_LLM_CALL_RETRIES # 5
        # 尝试调用LLM，最多重试max_retries次
        for attempt in range(max_retries):
            try:
                # 使用性能监控装饰器测量LLM推理时间
                with llm_inference_duration_seconds.labels(model=self.llm.model_name).time():
                    # 异步调用LLM生成响应  dump_messages将消息转储为字典列表
                    logger.info(f"第{attempt}次尝试调用llm生成响应...")
                    generated_state = {"messages": [await self.llm.ainvoke(dump_messages(messages))]}
                # 记录LLM响应生成成功的日志
                logger.info(
                    "llm_response_generated",
                    session_id=state["session_id"],
                    llm_calls_num=llm_calls_num + 1,
                    model=settings.LLM_MODEL,
                    environment=settings.ENVIRONMENT.value,
                )
                # 返回生成的状态
                return generated_state
            except OpenAIError as e:
                # 记录LLM调用失败的日志
                logger.error(
                    "llm_call_failed",
                    llm_calls_num=llm_calls_num,
                    attempt=attempt + 1,
                    max_retries=max_retries,
                    error=str(e),
                    environment=settings.ENVIRONMENT.value,
                )
                # 增加调用计数器
                llm_calls_num += 1

                #在生产环境中，如果接近最大重试次数，可能想要降级到更可靠的模型  兜底模型
                # 注意！但是这里面只修改了model的name？没有测这个功能所以没有完整修改model
                if settings.ENVIRONMENT == Environment.PRODUCTION and attempt == max_retries - 2:
                    # 设置备用模型
                    fallback_model = "deepseek-reasoner"
                    # 记录使用备用模型的警告日志
                    logger.warning(
                        "using_fallback_model", model=fallback_model, environment=settings.ENVIRONMENT.value
                    )
                    # 更改当前LLM模型为备用模型
                    self.llm.model_name = fallback_model
                # 继续下一次重试
                continue
        # 如果所有重试都失败了，抛出异常
        raise Exception(f"Failed to get a response from the LLM after {max_retries} attempts")

    async def _tool_call(self, state: SampleGraphState,config) -> SampleGraphState:
        """这段代码的功能是处理工具调用：
            遍历工具调用：从最后一条消息中提取所有工具调用请求
            执行工具：根据工具名称查找并异步执行对应的工具函数
            生成响应：将工具执行结果封装成ToolMessage对象
            返回结果：返回包含所有工具响应消息的更新状态
        Args:
           state: 包含消息和工具调用的当前代理状态。

        Returns:
            包含工具响应的更新消息的字典。
        """
        # 初始化输出列表，用于存储工具调用结果

        # 获取最后一条消息
        last_message = state["messages"][-1]
        state["searches"]=[]

        # 检查消息是否包含工具调用
        if not hasattr(last_message, 'tool_calls') or not last_message.tool_calls:
            # 如果没有工具调用，直接返回状态
            logger.info(f"最后一条消息不含工具调用:{last_message}")
            return state

        outputs = []
        # 工具调用请求列表
        approval_requests=[]
        # 获取当前日期和时间
        now = datetime.now()
        formatted_date = now.strftime("%Y-%m-%d %H:%M")
        # 遍历最后一条消息中的所有工具调用
        # 这里中断类型方便前端做不同处理 注意这里嵌套了多层if，只是为了演示
        for tool_call in last_message.tool_calls:

            approval_request = create_tool_approval_request(
                tool_call.get("name"),
                tool_call.get("args", {}),
                tool_call.get("id")
            )
            approval_requests.append(approval_request)
            approve_result = interrupt(approval_request)
            logger.info(f"approve_result={approve_result}")
            if(approve_result=="approved"):
                logger.info(f"工具调用通过:{tool_call}")
                # 根据工具名称从工具字典中找到对应工具，并异步调用该工具，传入工具参数
                # 注意这里的副作用 interrupt恢复后从当前节点头重新开始，会再走一次工具调用（api） 所以后面总是用审核后的结果来接收result
                # 小心这个副作用！
                # 推送
                toolname=tool_call.get("name")
                state["searches"].append({"query": f"调用{toolname}工具中", "done": False})
                await copilotkit_emit_state(config, state)
                await asyncio.sleep(0.5)

                tool_result = await self.tools_by_name[tool_call["name"]].ainvoke(tool_call["args"])
                logger.info(f"tool_result:{tool_result}")
                # 使用简单工厂函数创建工具结果审批请求
                tool_result_approval_request = create_tool_result_approval_request(
                    tool_name=tool_call["name"],
                    tool_args=tool_call["args"],
                    tool_result=tool_result,
                    tool_id=tool_call["id"]
                )

                # 发送中断请求等待前端审核工具结果
                tool_result_approval_result = interrupt(tool_result_approval_request)
                logger.info(f"工具调用结果审核状态: {tool_result_approval_result}")
                state["searches"][-1]["done"] = True  # 将最后一个元素的 done 设置为 True
                await copilotkit_emit_state(config, state)

                # 如果是天气工具，解析结果并发送给前端
                if tool_call["name"] == "get_weather":
                    try:
                        weather_data = json.loads(tool_result_approval_result)
                        await copilotkit_emit_tool_call(config, name="get_weather_card", args=weather_data)
                    except json.JSONDecodeError:
                        logger.error("Failed to parse weather data JSON")

                # 将工具调用结果封装成ToolMessage对象并添加到输出列表中
                outputs.append(
                    ToolMessage(
                        content=tool_result_approval_result,
                        name=tool_call["name"],
                        tool_call_id=tool_call["id"],
                    )
                )
                state["messages"] = state["messages"] + outputs
            if (approve_result == "rejected"):
                logger.info(f"工具调用被拒绝")
                tool_result="No result！"
                # 将工具调用结果封装成ToolMessage对象并添加到输出列表中
                outputs.append(
                    ToolMessage(
                        content=tool_result,
                        name=tool_call["name"],
                        tool_call_id=tool_call["id"],
                    )
                )
                state["messages"] = state["messages"] + outputs

        state["searches"]=[]

        return state
    def _should_continue(self, state: SampleGraphState) -> Literal["end", "continue"]:
        """ 根据最后一条消息确定代理应该继续还是结束。

        Args:
            state:

        Returns:
            Literal["end", "continue"]: "end" if there are no tool calls, "continue" otherwise.
        """
        messages = state["messages"]
        last_message = messages[-1]
        # 如果最后一条消息没有工具调用，则代理工作流结束
        if not last_message.tool_calls:
            return "end"
        #  否则如果存在工具调用，则代理工作流继续执行
        else:
            return "continue"

    async def shutdown(self):
        """关闭 SampleAgent，清理资源"""
        # 关闭 RocketMQ 连接
        if self.rocketmq_manager:
            # self.rocketmq_manager.shutdownProducer()
            logger.info("RocketMQ 连接已关闭")

        # 关闭数据库连接池
        if self._connection_pool:
            await self._connection_pool.close()
            logger.info("数据库连接池已关闭")
    async def create_graph(self) -> Optional[CompiledStateGraph]:
        """构建workflow

        Returns:
            Optional[CompiledStateGraph]: The configured LangGraph instance or None if init fails
        """
        if self._graph is None:
            try:
                graph_builder = StateGraph(SampleGraphState)
                graph_builder.add_node("chat", self._chat)
                graph_builder.add_node("long_term_memory",self._long_term_memory)
                graph_builder.add_node("rag",self._rag)
                graph_builder.add_node("getname", self._getname)
                graph_builder.add_node("tool_call", self._tool_call)
                graph_builder.add_node("cleanup", self._cleanup_state)  # 添加清理节点
                graph_builder.add_conditional_edges(
                    "chat",
                    self._should_continue,
                    {"continue": "tool_call", "end": "cleanup"},
                )

                graph_builder.add_edge("tool_call", "chat")
                graph_builder.add_edge("getname", "long_term_memory")
                graph_builder.add_edge("long_term_memory", "rag")
                graph_builder.add_edge("rag", "chat")
                graph_builder.add_edge("cleanup", END)  # 清理后结束
                graph_builder.set_entry_point("getname")
                # graph_builder.set_finish_point("chat")

                # 连接池
                connection_pool = await self._get_connection_pool()
                if connection_pool:
                    checkpointer = AsyncPostgresSaver(connection_pool)
                    await checkpointer.setup()
                else:
                    # 生产环境如果没有数据库连接池，则不保存检查点
                    checkpointer = None
                    if settings.ENVIRONMENT != Environment.PRODUCTION:
                        raise Exception("连接池初始化失败")

                self._graph = graph_builder.compile(
                    checkpointer=checkpointer, name=f"{settings.PROJECT_NAME} Agent ({settings.ENVIRONMENT.value})"
                )

                logger.info(
                    "graph_created",
                    graph_name=f"{settings.PROJECT_NAME} Agent",
                    environment=settings.ENVIRONMENT.value,
                    has_checkpointer=checkpointer is not None,
                )
            except Exception as e:
                logger.error("graph_creation_failed", error=str(e), environment=settings.ENVIRONMENT.value)
                # 在生产环境中，我们不希望应用崩溃
                if settings.ENVIRONMENT == Environment.PRODUCTION:
                    logger.warning("continuing_without_graph")
                    return None
                raise e

        return self._graph
    async def get_chat_history(self, session_id: str) -> list[Message]:
        """Get the chat history for a given thread ID.

        Args:
            session_id (str): The session ID for the conversation.

        Returns:
            list[Message]: The chat history.
        """
        if self._graph is None:
            self._graph = await self.create_graph()
        # 通过sync_to_async包装同步方法，获取指定session_id的状态快照
        state: StateSnapshot = await sync_to_async(self._graph.get_state)(
            config={"configurable": {"thread_id": session_id}}
        )
        # 如果状态中有值，则处理消息列表并返回，否则返回空列表
        return self.__process_messages(state.values["messages"]) if state.values else []

    def __process_messages(self, messages: list[BaseMessage]) -> list[Message]:
        # 将BaseMessage列表转换为OpenAI格式的消息列表
        openai_style_messages = convert_to_openai_messages(messages)
        # 过滤并保留仅助手和用户的消息，并确保消息内容不为空
        #OpenAI 格式的字典消息通过 Message(**message) 转换为 Message 对象。这一步实现了：
        # 数据类型标准化（从字典到结构化对象）
        # 自动验证（确保内容符合安全和格式要求）
        # 提供清晰的接口（明确的消息结构便于后续处理）


        return [
            Message(**message) # 将字典转换为Message对象
            for message in openai_style_messages # 遍历所有OpenAI格式的消息
            if message["role"] in ["assistant", "user"] and message["content"] # 过滤条件
        ]

    async def clear_chat_history(self, session_id: str) -> None:
        """Clear all chat history for a given thread ID.

        Args:
            session_id: The ID of the session to clear history for.

        Raises:
            Exception: If there's an error clearing the chat history.
        """
        try:
            # Make sure the pool is initialized in the current event loop
            conn_pool = await self._get_connection_pool()

            # Use a new connection for this specific operation
            async with conn_pool.connection() as conn:
                for table in settings.CHECKPOINT_TABLES:
                    try:
                        await conn.execute(f"DELETE FROM {table} WHERE thread_id = %s", (session_id,))
                        logger.info(f"Cleared {table} for session {session_id}")
                    except Exception as e:
                        logger.error(f"Error clearing {table}", error=str(e))
                        raise

        except Exception as e:
            logger.error("Failed to clear chat history", error=str(e))
            raise
