"""
AI Agent核心逻辑
整合LLM、RAG检索、对话记忆、Function Calling，实现智能客服
"""
from typing import Dict, Any, Optional, AsyncIterator, Callable
from loguru import logger
import json

from app.core.llm import llm_client
from app.core.retriever import knowledge_retriever
from app.core.memory import conversation_memory
from app.core.prompts import (
    SYSTEM_PROMPT,
    GREETING_TEMPLATE,
    RAG_QUERY_WITH_HISTORY_TEMPLATE,
    format_knowledge_context,
    format_conversation_history,
)
from app.services.tools import (
    TOOLS_DEFINITION,
    search_order,
    get_user_orders,
    track_logistics,
    search_product,
    get_available_coupons,
    recommend_products,
    compare_products,
    get_product_details,
    get_price_history,
    search_product_price_history,
)
from app.db.database import db_manager


class CustomerServiceAgent:
    """电商客服AI Agent - 支持Function Calling"""
    
    def __init__(self):
        """初始化Agent"""
        self.llm = llm_client
        self.retriever = knowledge_retriever
        self.memory = conversation_memory
        self.system_prompt = SYSTEM_PROMPT
        self.tools = TOOLS_DEFINITION
        
        # 工具函数映射
        self.tool_functions: Dict[str, Callable] = {
            "search_order": self._wrap_search_order,
            "get_user_orders": self._wrap_get_user_orders,
            "track_logistics": self._wrap_track_logistics,
            "search_product": search_product,
            "get_available_coupons": get_available_coupons,
            "recommend_products": self._wrap_recommend_products,
            "compare_products": self._wrap_compare_products,
            "get_product_details": self._wrap_get_product_details,
            "get_price_history": self._wrap_get_price_history,
            "search_product_price_history": self._wrap_search_product_price_history,
        }
    
    async def _wrap_search_order(self, order_sn: str, **kwargs) -> Dict:
        """包装search_order，注入数据库会话"""
        async for db in db_manager.get_session():
            return await search_order(order_sn, db)
    
    async def _wrap_get_user_orders(self, user_id: int, **kwargs) -> list:
        """包装get_user_orders，注入数据库会话"""
        async for db in db_manager.get_session():
            return await get_user_orders(user_id, db, **kwargs)
    
    async def _wrap_track_logistics(self, **kwargs) -> Optional[Dict]:
        """包装track_logistics，注入数据库会话"""
        async for db in db_manager.get_session():
            return await track_logistics(**kwargs, db=db)
    
    async def _wrap_recommend_products(self, **kwargs) -> Dict:
        """包装recommend_products，注入数据库会话"""
        async for db in db_manager.get_session():
            return await recommend_products(**kwargs, db=db)
    
    async def _wrap_compare_products(self, product_ids: list, **kwargs) -> Dict:
        """包装compare_products，注入数据库会话"""
        async for db in db_manager.get_session():
            return await compare_products(product_ids, db)
    
    async def _wrap_get_product_details(self, **kwargs) -> Optional[Dict]:
        """包装get_product_details，注入数据库会话"""
        async for db in db_manager.get_session():
            return await get_product_details(**kwargs, db=db)
    
    async def _wrap_get_price_history(self, **kwargs) -> Optional[Dict]:
        """包装get_price_history，注入数据库会话"""
        async for db in db_manager.get_session():
            return await get_price_history(**kwargs, db=db)
    
    async def _wrap_search_product_price_history(self, **kwargs) -> Optional[Dict]:
        """包装search_product_price_history，注入数据库会话"""
        async for db in db_manager.get_session():
            return await search_product_price_history(**kwargs, db=db)
    
    async def _execute_tool(self, tool_name: str, tool_args: Dict) -> Any:
        """执行工具函数"""
        try:
            logger.info(f"执行工具: {tool_name}, 参数: {tool_args}")
            
            if tool_name not in self.tool_functions:
                return {"error": f"未知的工具: {tool_name}"}
            
            tool_func = self.tool_functions[tool_name]
            result = await tool_func(**tool_args)
            
            logger.info(f"工具执行成功: {tool_name}")
            return result
            
        except Exception as e:
            logger.error(f"工具执行失败: {tool_name}, 错误: {e}")
            return {"error": str(e)}
        
    async def _detect_order_numbers(self, message: str) -> list:
        """
        检测消息中的订单号
        
        Args:
            message: 用户消息
            
        Returns:
            订单号列表
        """
        import re
        # 匹配订单号格式：ORD + 年月日 + 序号（例如：ORD20251108001）
        pattern = r'ORD\d{11,}'
        order_numbers = re.findall(pattern, message, re.IGNORECASE)
        return order_numbers
    
    async def chat(
        self,
        user_id: str,
        message: str,
        use_rag: bool = True,
        save_history: bool = True
    ) -> Dict[str, Any]:
        """
        处理用户消息（非流式）
        
        Args:
            user_id: 用户ID
            message: 用户消息
            use_rag: 是否使用RAG检索
            save_history: 是否保存历史
            
        Returns:
            响应字典
        """
        try:
            logger.info(f"收到用户消息 | 用户: {user_id} | 消息: {message[:50]}...")
            
            # 1. 保存用户消息
            if save_history:
                await self.memory.save_message(user_id, "user", message)
            
            # 🔍 预检测：如果消息中包含订单号，强制启用订单查询工具
            detected_order_numbers = await self._detect_order_numbers(message)
            if detected_order_numbers:
                logger.info(f"检测到订单号: {detected_order_numbers}，将提示LLM调用工具")
                # 在消息中添加明确的提示
                message = f"{message}\n\n[系统提示：检测到订单号{', '.join(detected_order_numbers)}，请调用search_order工具查询订单详情]"
            
            # 2. RAG检索（如果启用）
            knowledge_context = ""
            retrieved_docs = []
            
            # 🔧 如果检测到订单号，跳过RAG检索（避免干扰工具调用）
            if use_rag and not detected_order_numbers:
                try:
                    retrieved_docs = await self.retriever.search_with_rerank(
                        query=message,
                        k=5,
                        threshold=0.2
                    )
                    
                    if retrieved_docs:
                        knowledge_context = format_knowledge_context(retrieved_docs)
                        logger.info(f"RAG检索到 {len(retrieved_docs)} 个相关文档")
                        # 调试信息：显示检索到的文档片段
                        for idx, doc in enumerate(retrieved_docs[:3], 1):
                            logger.debug(f"检索文档{idx} (相关度:{doc.get('relevance_score', 0):.3f}): {doc.get('content', '')[:100]}...")
                    else:
                        logger.warning(f"RAG未检索到相关文档,用户问题: {message}")
                except Exception as e:
                    logger.warning(f"RAG检索失败，降级为普通对话: {e}")
                    retrieved_docs = []  # 降级处理，不影响后续流程
            elif detected_order_numbers:
                logger.info("检测到订单号，跳过RAG检索，直接使用工具查询")
            
            # 3. 获取对话历史
            history = await self.memory.get_history(user_id, limit=5)
            history_text = format_conversation_history(history)
            
            # 4. 构建Prompt
            if knowledge_context:
                # 使用RAG增强的Prompt
                user_prompt = RAG_QUERY_WITH_HISTORY_TEMPLATE.format(
                    context=knowledge_context,
                    history=history_text if history else "暂无历史",
                    question=message
                )
            else:
                # 普通对话
                user_prompt = message
            
            # 5. 获取完整消息列表
            messages = await self.memory.get_context_messages(
                user_id=user_id,
                system_prompt=self.system_prompt,
                max_history=3
            )
            
            # 添加当前用户消息
            messages.append({"role": "user", "content": user_prompt})
            
            # 6. 调用LLM（支持Function Calling）
            logger.info("调用LLM生成回复...")
            try:
                response = await self.llm.chat(messages, tools=self.tools)
            except Exception as e:
                logger.error(f"LLM主调用失败: {e}")
                # 降级1: 尝试不使用tools
                try:
                    logger.warning("降级处理：不使用工具重试LLM调用")
                    response = await self.llm.chat(messages)
                except Exception as e2:
                    logger.error(f"LLM降级调用也失败: {e2}")
                    # 降级2: 返回预设回复
                    return {
                        "success": True,
                        "reply": "抱歉，系统当前繁忙，请稍后重试。如有紧急问题，请联系人工客服：400-XXX-XXXX",
                        "used_rag": False,
                        "retrieved_docs": [],
                        "error": str(e)
                    }
            
            # 7. 处理工具调用
            tool_calls = response.get("tool_calls", [])
            if tool_calls:
                logger.info(f"LLM请求调用 {len(tool_calls)} 个工具")
                
                # 添加助手的工具调用消息
                messages.append({
                    "role": "assistant",
                    "content": response.get("content"),
                    "tool_calls": tool_calls
                })
                
                # 执行所有工具调用
                tool_execution_failed = False
                for tool_call in tool_calls:
                    tool_name = tool_call.get("function", {}).get("name")
                    tool_args_str = tool_call.get("function", {}).get("arguments", "{}")
                    tool_call_id = tool_call.get("id")
                    
                    try:
                        tool_args = json.loads(tool_args_str)
                    except json.JSONDecodeError:
                        tool_args = {}
                    
                    # 执行工具
                    tool_result = await self._execute_tool(tool_name, tool_args)
                    
                    # 检查工具执行是否失败
                    if isinstance(tool_result, dict) and "error" in tool_result:
                        logger.warning(f"工具执行失败: {tool_name} - {tool_result.get('error')}")
                        tool_execution_failed = True
                    
                    # 添加工具结果消息
                    messages.append({
                        "role": "tool",
                        "tool_call_id": tool_call_id,
                        "name": tool_name,
                        "content": json.dumps(tool_result, ensure_ascii=False)
                    })
                
                # 再次调用LLM，让它基于工具结果生成最终回复
                logger.info("基于工具结果再次调用LLM...")
                try:
                    response = await self.llm.chat(messages)
                except Exception as e:
                    logger.error(f"基于工具结果的LLM调用失败: {e}")
                    # 如果工具执行失败，返回提示信息
                    if tool_execution_failed:
                        assistant_reply = "抱歉，我在查询信息时遇到了问题，请稍后重试或联系人工客服。"
                    else:
                        assistant_reply = "抱歉，系统出现临时故障，请稍后重试。"
                    response = {"content": assistant_reply}
            
            assistant_reply = response.get("content", "抱歉，我暂时无法回答。")
            
            # 8. 保存助手回复
            if save_history:
                await self.memory.save_message(
                    user_id,
                    "assistant",
                    assistant_reply,
                    metadata={
                        "tokens": response.get("tokens"),
                        "cost": response.get("cost"),
                        "used_rag": bool(retrieved_docs),
                        "retrieved_docs_count": len(retrieved_docs)
                    }
                )
            
            # 9. 构建响应
            result = {
                "success": True,
                "reply": assistant_reply,
                "used_rag": bool(retrieved_docs),
                "retrieved_docs": retrieved_docs[:2] if retrieved_docs else [],  # 只返回前2个
                "tokens": response.get("tokens"),
                "cost": response.get("cost"),
                "duration": response.get("duration"),
            }
            
            logger.info(f"回复生成成功 | 用户: {user_id} | 耗时: {response.get('duration', 0):.2f}s")
            
            return result
            
        except Exception as e:
            logger.error(f"Agent处理失败: {e}", exc_info=True)
            return {
                "success": False,
                "reply": "抱歉，系统出现了一些问题，请稍后再试。",
                "error": str(e)
            }
    
    async def chat_stream(
        self,
        user_id: str,
        message: str,
        use_rag: bool = True,
        save_history: bool = True
    ) -> AsyncIterator[str]:
        """
        处理用户消息（流式）
        
        Args:
            user_id: 用户ID
            message: 用户消息
            use_rag: 是否使用RAG检索
            save_history: 是否保存历史
            
        Yields:
            逐字生成的内容
        """
        try:
            logger.info(f"收到用户消息（流式） | 用户: {user_id}")
            
            # 1. 保存用户消息
            if save_history:
                await self.memory.save_message(user_id, "user", message)
            
            # 2. RAG检索
            knowledge_context = ""
            if use_rag:
                try:
                    retrieved_docs = await self.retriever.search_with_rerank(
                        query=message,
                        k=5,
                        threshold=0.2
                    )
                    if retrieved_docs:
                        knowledge_context = format_knowledge_context(retrieved_docs)
                        # 调试日志
                        for idx, doc in enumerate(retrieved_docs[:3], 1):
                            logger.debug(f"流式-检索文档{idx} (相关度:{doc.get('relevance_score', 0):.3f}): {doc.get('content', '')[:100]}...")
                except Exception as e:
                    logger.warning(f"流式模式RAG检索失败，降级为普通对话: {e}")
            
            # 3. 获取历史
            history = await self.memory.get_history(user_id, limit=5)
            history_text = format_conversation_history(history)
            
            # 4. 构建Prompt
            if knowledge_context:
                user_prompt = RAG_QUERY_WITH_HISTORY_TEMPLATE.format(
                    context=knowledge_context,
                    history=history_text if history else "暂无历史",
                    question=message
                )
            else:
                user_prompt = message
            
            # 5. 构建消息
            messages = await self.memory.get_context_messages(
                user_id, self.system_prompt, max_history=3
            )
            messages.append({"role": "user", "content": user_prompt})
            
            # 6. 流式调用LLM
            full_reply = ""
            try:
                async for chunk in self.llm.chat_stream(messages):
                    full_reply += chunk
                    yield chunk
            except Exception as e:
                logger.error(f"流式LLM调用失败: {e}")
                error_msg = "\n\n抱歉，系统出现临时故障，请刷新后重试。"
                full_reply += error_msg
                yield error_msg
            
            # 7. 保存完整回复
            if save_history and full_reply:
                await self.memory.save_message(user_id, "assistant", full_reply)
            
        except Exception as e:
            logger.error(f"Agent流式处理失败: {e}")
            yield f"\n\n抱歉，系统出现了一些问题：{str(e)}"
    
    async def get_greeting(self) -> str:
        """获取欢迎语"""
        return GREETING_TEMPLATE
    
    async def clear_conversation(self, user_id: str) -> bool:
        """清除用户对话历史"""
        return await self.memory.clear_history(user_id)
    
    async def get_conversation_summary(self, user_id: str) -> Optional[str]:
        """获取对话摘要"""
        return await self.memory.summarize_conversation(user_id, self.llm)


# 全局Agent实例
customer_service_agent = CustomerServiceAgent()


# 便捷函数
async def process_message(
    user_id: str,
    message: str,
    use_rag: bool = True
) -> Dict[str, Any]:
    """便捷的消息处理函数"""
    return await customer_service_agent.chat(user_id, message, use_rag)


async def process_message_stream(
    user_id: str,
    message: str,
    use_rag: bool = True
) -> AsyncIterator[str]:
    """便捷的流式消息处理函数"""
    async for chunk in customer_service_agent.chat_stream(user_id, message, use_rag):
        yield chunk

