from typing import List, Dict, Any, AsyncGenerator, Optional
import logging

from langchain.agents import AgentExecutor, create_react_agent
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.tools import BaseTool
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from langchain_core.agents import AgentAction, AgentFinish

from app.services.llm.prompt_templates import REASON_ACT_PROMPT
from app.services.tools.memory_manager import get_memory_manager

# 创建专用的logger
logger = logging.getLogger(__name__)

class DetailedCallbackHandler(BaseCallbackHandler):
    """详细的回调处理器，用于追踪LangChain的执行过程"""
    
    def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs) -> None:
        """LLM开始时调用"""
        logger.info("🚀 LLM开始执行")
        logger.info(f"📝 发送的提示词: {prompts}")
        logger.info(f"🔧 LLM配置: {serialized}")
    
    def on_llm_end(self, response: LLMResult, **kwargs) -> None:
        """LLM结束时调用"""
        logger.info("✅ LLM执行完成")
        logger.info(f"📤 LLM响应: {response.generations}")
    
    def on_llm_error(self, error: Exception, **kwargs) -> None:
        """LLM出错时调用"""
        logger.error(f"❌ LLM执行出错: {error}")
    
    def on_agent_action(self, action: AgentAction, **kwargs) -> None:
        """代理执行动作时调用"""
        logger.info(f"🤖 代理执行动作: {action.tool}")
        logger.info(f"📋 动作输入: {action.tool_input}")
        logger.info(f"💭 代理思考过程: {action.log}")
    
    def on_agent_finish(self, finish: AgentFinish, **kwargs) -> None:
        """代理完成时调用"""
        logger.info(f"🏁 代理执行完成")
        logger.info(f"📋 最终输出: {finish.return_values}")
        logger.info(f"💭 完整日志: {finish.log}")
    
    def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs) -> None:
        """工具开始执行时调用"""
        logger.info(f"🔧 工具开始执行: {serialized.get('name', 'Unknown')}")
        logger.info(f"📥 工具输入: {input_str}")
    
    def on_tool_end(self, output: str, **kwargs) -> None:
        """工具执行完成时调用"""
        logger.info(f"✅ 工具执行完成")
        logger.info(f"📤 工具输出: {output}")
    
    def on_tool_error(self, error: Exception, **kwargs) -> None:
        """工具执行出错时调用"""
        logger.error(f"❌ 工具执行出错: {error}")
    
    def on_text(self, text: str, **kwargs) -> None:
        """处理文本时调用"""
        logger.info(f"📄 处理文本: {text}")
    
    def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs) -> None:
        """链开始执行时调用"""
        logger.info(f"⛓️ 链开始执行: {serialized.get('name', 'Unknown')}")
        logger.info(f"📥 链输入: {inputs}")
    
    def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:
        """链执行完成时调用"""
        logger.info(f"✅ 链执行完成")
        logger.info(f"📤 链输出: {outputs}")
    
    def on_chain_error(self, error: Exception, **kwargs) -> None:
        """链执行出错时调用"""
        logger.error(f"❌ 链执行出错: {error}")


class ToolAgent:
    """工具代理，使用Reason-Act模型自动选择和使用工具"""

    def __init__(self, llm: BaseChatModel, tools: List[BaseTool]):
        """初始化工具代理
        
        Args:
            llm: 语言模型
            tools: 可用工具列表
        """
        self.llm = llm
        self.tools = tools
        
        # 创建详细回调处理器
        self.callback_handler = DetailedCallbackHandler()
        
        logger.info(f"🔧 初始化ToolAgent，工具数量: {len(tools)}")
        logger.info(f"🛠️ 可用工具: {[tool.name for tool in tools]}")
        
        self.agent = create_react_agent(
            llm=self.llm,
            tools=self.tools,
            prompt=REASON_ACT_PROMPT,
        )
        self.tools_dict = {tool.name: tool for tool in tools}
        self.memory_manager = get_memory_manager()

    async def astream_run(self, user_input: str, session_id: Optional[str] = None) -> AsyncGenerator[
        Dict[str, Any], None]:
        """流式运行代理"""
        try:
            logger.info(f"🚀 开始执行代理任务")
            logger.info(f"📥 用户输入: {user_input}")
            logger.info(f"🔑 会话ID: {session_id}")
            
            # 获取会话记忆
            memory = self.memory_manager.get_memory(session_id) if session_id else None
            
            if memory:
                logger.info(f"💾 加载会话记忆，历史消息数量: {len(memory.chat_memory.messages)}")
            else:
                logger.info("💾 无会话记忆")
    
            # 创建带记忆的Executor，添加回调处理器
            executor = AgentExecutor(
                agent=self.agent,
                tools=self.tools,
                verbose=True,
                handle_parsing_errors=True,
                max_iterations=5,
                memory=memory,
                callbacks=[self.callback_handler]
            )
    
            # 准备输入 - 只传递input，让AgentExecutor自动处理其他参数
            inputs = {
                "input": user_input
            }
            logger.info(f"📋 准备执行，输入参数: {inputs}")
            
            # 如果有记忆，记录历史信息用于调试
            if memory:
                memory_vars = memory.load_memory_variables({})
                history = memory_vars.get("history", [])
                formatted_history = "\n"
                for item in history:
                    if type(item) == HumanMessage:
                        formatted_history += f"Human: {item.content}\n"
                    elif type(item) == AIMessage:
                        formatted_history += f"AI: {item.content}\n"
                logger.info(f"💾 当前会话历史: {formatted_history}")
    
            full_output = ""
            final_answer_started = False
            accumulated_content = ""
            final_answer_content = ""
            
            # 使用astream_events API进行真正的流式输出
            logger.info("🔄 开始流式执行")
            event_count = 0
            async for event in executor.astream_events(inputs, version="v1"):
                event_count += 1
                kind = event["event"]
                logger.info(f"📡 事件 #{event_count}: {kind} - {event.get('name', 'N/A')}")
                
                # 处理LLM开始事件
                if kind == "on_chat_model_stream":
                    content = event["data"]["chunk"].content
                    if content:
                        logger.info(f"💬 LLM流式输出: {repr(content)}")
                        accumulated_content += content
                        full_output += content
                        logger.info(f"📊 累积内容长度: {len(accumulated_content)}, 完整输出长度: {len(full_output)}")
                        
                        # 检查是否包含Final Answer标记
                        if "Final Answer:" in accumulated_content and not final_answer_started:
                            final_answer_started = True
                            final_answer_index = accumulated_content.find("Final Answer:")
                            logger.info(f"🎯 检测到Final Answer标记，位置: {final_answer_index}")
                            
                            # 提取Final Answer之后的内容，跳过"Final Answer:"标记
                            answer_content = accumulated_content[final_answer_index + len("Final Answer:"):].lstrip(' \n\r\t')
                            if answer_content:
                                logger.info(f"📤 输出Final Answer内容: {repr(answer_content)}")
                                yield {"content": answer_content, "type": "llm_stream"}
                        elif final_answer_started:
                            # 如果已经开始Final Answer，直接输出新内容（跳过Final Answer标记字符）
                            logger.info(f"📤 继续输出Final Answer内容: {repr(content)}")
                            yield {"content": content, "type": "llm_stream"}
                        else:
                            # 检查当前内容是否包含"Final Answer:"的一部分，如果是则跳过
                            if any(partial in content for partial in ["Final", "Answer", ":"] if "Final Answer:" not in accumulated_content):
                                logger.info(f"🔍 跳过Final Answer标记的一部分: {repr(content)}")
                                # 不输出，等待完整的Final Answer标记
                            else:
                                # 输出思考过程
                                logger.info(f"💭 输出思考过程: {repr(content)}")
                                yield {"content": content, "type": "llm_stream"}
    
            logger.info(f"✅ 代理任务执行完成，总共处理 {event_count} 个事件")
    
            # 在流结束时保存上下文
            if memory and full_output:
                # 只保存Final Answer部分到记忆中
                if final_answer_started and "Final Answer:" in full_output:
                    final_answer_index = full_output.find("Final Answer:")
                    final_answer_only = full_output[final_answer_index + len("Final Answer:"):].lstrip(' \n\r\t')
                    memory.save_context({"input": user_input}, {"output": final_answer_only})
                    logger.info(f"💾 保存Final Answer到记忆: {repr(final_answer_only)}")
                else:
                    # 如果没有Final Answer，保存完整输出
                    memory.save_context({"input": user_input}, {"output": full_output})
                    logger.info(f"💾 保存完整输出到记忆: {repr(full_output)}")
    
        except Exception as e:
            import traceback
            logger.error(f"❌ 代理执行出错: {str(e)}", exc_info=True)
            print(f"流式输出发生异常：{str(e)}")
            print("异常堆栈：\n" + traceback.format_exc())
            yield {"content": "老朽不知如何解答此问。", "type": "error"}
