# memory/enhanced_rag_retriever.py
# 增强的RAG检索器，支持三种记忆类型的智能检索

import asyncio
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime, timedelta
from .memory_manager import EnhancedMemoryManager, EnhancedMemoryNode, MemoryType, MemoryImportance
from .local_embedding_client import LocalEmbeddingClient


class EnhancedRAGRetriever:
    """增强的RAG检索器，负责从增强记忆中检索相关信息并生成上下文"""
    
    def __init__(self, memory_manager: EnhancedMemoryManager, embedding_client: LocalEmbeddingClient):
        self.memory_manager = memory_manager
        self.embedding_client = embedding_client
        
        # RAG配置参数
        self.max_context_length = 4000  # 最大上下文长度
        self.similarity_threshold = 0.4  # 相似度阈值（降低以提高检索成功率）
        self.max_retrieved_memories = 10  # 最大检索记忆数量
        self.context_depth = 2  # 上下文深度
    
    async def retrieve_relevant_context(self, query: str, include_metadata: bool = True) -> Dict[str, Any]:
        """
        检索与查询相关的上下文信息
        
        Args:
            query: 用户查询
            include_metadata: 是否包含元数据
            
        Returns:
            包含相关上下文的字典
        """
        # 1. 判断查询类型并选择检索策略
        query_type = self._analyze_query_type(query)
        
        # 2. 根据查询类型进行不同的检索
        if query_type == "temporal":
            # 时间相关查询
            context = await self._retrieve_temporal_context(query)
        elif query_type == "factual":
            # 事实性查询，优先使用长期记忆
            context = await self._retrieve_factual_context(query)
        else:
            # 一般性查询，使用混合检索
            context = await self._retrieve_mixed_context(query)
        
        # 3. 添加检索统计信息
        context["retrieval_stats"] = {
            "query": query,
            "query_type": query_type,
            "timestamp": datetime.now().isoformat()
        }
        
        return context
    
    def _analyze_query_type(self, query: str) -> str:
        """
        分析查询类型
        
        Args:
            query: 用户查询
            
        Returns:
            查询类型：temporal, factual, general
        """
        # 清理查询文本，移除标点符号
        query_clean = query.lower().replace("？", "").replace("?", "").replace("。", "").replace(".", "").strip()
        
        # 时间相关关键词
        temporal_keywords = [
            "昨天", "前天", "今天", "上周", "上个月", "去年", "最近", "之前", "以前",
            "什么时候", "时间", "日期", "吃了什么", "做了什么", "去了哪里"
        ]
        
        # 事实性查询关键词（扩展列表，支持更多个人信息查询）
        factual_keywords = [
            "我叫什么", "我的名字", "我叫", "我的姓名", "我是谁", "我的身份",
            "我喜欢", "我不喜欢", "我的爱好", "我的职业", "我的工作",
            "我的年龄", "我的生日", "我的家乡", "我的学校", "我的公司",
            "你叫什么", "你的名字", "你是谁", "你的身份", "你的职业",
            "你是什么", "你是什么", "你是什么人", "你是什么身份"
        ]
        
        # 检查时间相关
        for keyword in temporal_keywords:
            if keyword in query_clean:
                return "temporal"
        
        # 检查事实性查询
        for keyword in factual_keywords:
            if keyword in query_clean:
                return "factual"
        
        return "general"
    
    async def _retrieve_temporal_context(self, query: str) -> Dict[str, Any]:
        """检索时间相关的上下文"""
        # 1. 搜索时间记忆
        temporal_memories = await self.memory_manager.search_temporal_memories(query)
        
        # 2. 搜索相关的短期和长期记忆
        similar_memories = await self.memory_manager.search_memories(
            query, 
            memory_types=[MemoryType.SHORT_TERM, MemoryType.LONG_TERM],
            limit=5,
            min_similarity=0.4  # 使用更低的相似度阈值
        )
        
        # 3. 构建上下文
        context = {
            "relevant_memories": [],
            "temporal_memories": [],
            "context_summary": "",
            "memory_timeline": [],
            "key_concepts": set()
        }
        
        # 添加时间记忆
        for memory in temporal_memories:
            memory_info = {
                "id": memory.id,
                "content": memory.content,
                "type": memory.memory_type.value,
                "timestamp": memory.timestamp,
                "time_reference": memory.metadata.get('time_reference', ''),
                "importance": memory.importance.value
            }
            context["temporal_memories"].append(memory_info)
            context["memory_timeline"].append({
                "timestamp": memory.timestamp,
                "content": memory.content[:100] + "..." if len(memory.content) > 100 else memory.content,
                "type": "temporal"
            })
        
        # 添加相关记忆
        for memory, similarity in similar_memories:
            memory_info = {
                "id": memory.id,
                "content": memory.content,
                "type": memory.memory_type.value,
                "timestamp": memory.timestamp,
                "similarity": similarity,
                "importance": memory.importance.value
            }
            context["relevant_memories"].append(memory_info)
            context["memory_timeline"].append({
                "timestamp": memory.timestamp,
                "content": memory.content[:100] + "..." if len(memory.content) > 100 else memory.content,
                "type": memory.memory_type.value
            })
        
        # 生成上下文摘要
        context["context_summary"] = self._generate_temporal_summary(context)
        context["key_concepts"] = list(context["key_concepts"])
        
        return context
    
    async def _retrieve_factual_context(self, query: str) -> Dict[str, Any]:
        """检索事实性上下文，优先使用长期记忆"""
        # 1. 优先搜索长期记忆
        long_term_memories = await self.memory_manager.search_memories(
            query,
            memory_types=[MemoryType.LONG_TERM],
            limit=5,
            min_similarity=0.4  # 使用更低的相似度阈值
        )
        
        # 2. 补充搜索短期记忆
        short_term_memories = await self.memory_manager.search_memories(
            query,
            memory_types=[MemoryType.SHORT_TERM],
            limit=3,
            min_similarity=0.4  # 使用更低的相似度阈值
        )
        
        # 3. 构建上下文
        context = {
            "relevant_memories": [],
            "factual_memories": [],
            "context_summary": "",
            "memory_timeline": [],
            "key_concepts": set()
        }
        
        # 添加长期记忆（事实性信息）
        for memory, similarity in long_term_memories:
            memory_info = {
                "id": memory.id,
                "content": memory.content,
                "type": memory.memory_type.value,
                "timestamp": memory.timestamp,
                "similarity": similarity,
                "importance": memory.importance.value
            }
            context["factual_memories"].append(memory_info)
            context["memory_timeline"].append({
                "timestamp": memory.timestamp,
                "content": memory.content[:100] + "..." if len(memory.content) > 100 else memory.content,
                "type": "factual"
            })
        
        # 添加短期记忆（补充信息）
        for memory, similarity in short_term_memories:
            memory_info = {
                "id": memory.id,
                "content": memory.content,
                "type": memory.memory_type.value,
                "timestamp": memory.timestamp,
                "similarity": similarity,
                "importance": memory.importance.value
            }
            context["relevant_memories"].append(memory_info)
            context["memory_timeline"].append({
                "timestamp": memory.timestamp,
                "content": memory.content[:100] + "..." if len(memory.content) > 100 else memory.content,
                "type": "context"
            })
        
        # 生成上下文摘要
        context["context_summary"] = self._generate_factual_summary(context)
        context["key_concepts"] = list(context["key_concepts"])
        
        return context
    
    async def _retrieve_mixed_context(self, query: str) -> Dict[str, Any]:
        """检索混合上下文，使用所有类型的记忆"""
        # 1. 搜索所有类型的记忆
        all_memories = await self.memory_manager.search_memories(
            query,
            memory_types=None,  # 搜索所有类型
            limit=self.max_retrieved_memories,
            min_similarity=0.4  # 使用更低的相似度阈值
        )
        
        # 2. 获取上下文相关的记忆
        contextual_memories = await self.memory_manager.get_contextual_memories(
            query,
            context_depth=self.context_depth
        )
        
        # 3. 合并和去重记忆
        merged_memories = self._merge_and_deduplicate_memories(all_memories, contextual_memories)
        
        # 4. 构建上下文
        context = {
            "relevant_memories": [],
            "context_summary": "",
            "memory_timeline": [],
            "key_concepts": set()
        }
        
        # 按记忆类型分组
        memory_by_type = {
            MemoryType.SHORT_TERM.value: [],
            MemoryType.LONG_TERM.value: [],
            MemoryType.TEMPORAL.value: []
        }
        
        for memory, similarity, source in merged_memories:
            memory_info = {
                "id": memory.id,
                "content": memory.content,
                "type": memory.memory_type.value,
                "timestamp": memory.timestamp,
                "similarity": similarity,
                "source": source,
                "importance": memory.importance.value
            }
            
            context["relevant_memories"].append(memory_info)
            memory_by_type[memory.memory_type.value].append(memory_info)
            
            context["memory_timeline"].append({
                "timestamp": memory.timestamp,
                "content": memory.content[:100] + "..." if len(memory.content) > 100 else memory.content,
                "type": memory.memory_type.value
            })
            
            # 提取关键概念
            if memory.memory_type in [MemoryType.LONG_TERM, MemoryType.TEMPORAL]:
                context["key_concepts"].add(memory.content)
        
        # 添加按类型分组的记忆
        context["memories_by_type"] = memory_by_type
        
        # 生成上下文摘要
        context["context_summary"] = self._generate_mixed_summary(context)
        context["key_concepts"] = list(context["key_concepts"])
        
        return context
    
    def _merge_and_deduplicate_memories(self, 
                                      similar_memories: List[Tuple[EnhancedMemoryNode, float]], 
                                      contextual_memories: List[EnhancedMemoryNode]) -> List[Tuple[EnhancedMemoryNode, float, str]]:
        """
        合并和去重记忆，返回格式为 (EnhancedMemoryNode, similarity_score, source)
        """
        memory_dict = {}
        
        # 添加相似记忆
        for memory_node, similarity in similar_memories:
            memory_dict[memory_node.id] = (memory_node, similarity, "similarity")
        
        # 添加上下文记忆（如果不在相似记忆中）
        for memory_node in contextual_memories:
            if memory_node.id not in memory_dict:
                # 为上下文记忆计算相似度
                memory_dict[memory_node.id] = (memory_node, 0.5, "context")  # 默认相似度
        
        # 转换为列表并按相似度排序
        merged_memories = list(memory_dict.values())
        merged_memories.sort(key=lambda x: x[1], reverse=True)
        
        return merged_memories[:self.max_retrieved_memories]
    
    def _generate_temporal_summary(self, context: Dict[str, Any]) -> str:
        """生成时间相关的上下文摘要"""
        temporal_memories = context.get("temporal_memories", [])
        relevant_memories = context.get("relevant_memories", [])
        
        if not temporal_memories and not relevant_memories:
            return "没有找到相关的时间记忆。"
        
        summary_parts = []
        
        # 添加时间记忆摘要
        if temporal_memories:
            summary_parts.append(f"找到 {len(temporal_memories)} 条时间相关记忆:")
            for memory in temporal_memories[:3]:  # 只显示前3个
                time_ref = memory.get('time_reference', '')
                content = memory['content']
                summary_parts.append(f"- {time_ref}: {content}")
        
        # 添加相关记忆摘要
        if relevant_memories:
            summary_parts.append(f"还有 {len(relevant_memories)} 条相关记忆。")
        
        return " | ".join(summary_parts)
    
    def _generate_factual_summary(self, context: Dict[str, Any]) -> str:
        """生成事实性上下文摘要"""
        factual_memories = context.get("factual_memories", [])
        relevant_memories = context.get("relevant_memories", [])
        
        if not factual_memories and not relevant_memories:
            return "没有找到相关的事实记忆。"
        
        summary_parts = []
        
        # 添加事实记忆摘要
        if factual_memories:
            summary_parts.append(f"用户个人信息:")
            for memory in factual_memories[:3]:  # 只显示前3个
                content = memory['content']
                summary_parts.append(f"• {content}")
        
        # 添加相关记忆摘要
        if relevant_memories:
            summary_parts.append(f"其他相关信息:")
            for memory in relevant_memories[:2]:  # 只显示前2个
                content = memory['content']
                summary_parts.append(f"• {content}")
        
        return "\n".join(summary_parts)
    
    def _generate_mixed_summary(self, context: Dict[str, Any]) -> str:
        """生成混合上下文摘要"""
        relevant_memories = context.get("relevant_memories", [])
        memories_by_type = context.get("memories_by_type", {})
        
        if not relevant_memories:
            return "没有找到相关的历史记忆。"
        
        # 按相似度排序
        relevant_memories.sort(key=lambda x: x.get("similarity", 0), reverse=True)
        
        summary_parts = []
        
        # 添加最相关的记忆
        top_memories = relevant_memories[:3]
        for memory in top_memories:
            content = memory["content"]
            memory_type = memory["type"]
            summary_parts.append(f"[{memory_type}] {content}")
        
        # 添加统计信息
        if len(relevant_memories) > 3:
            summary_parts.append(f"还有 {len(relevant_memories) - 3} 条相关记忆。")
        
        # 添加类型统计
        type_counts = {k: len(v) for k, v in memories_by_type.items() if v}
        if type_counts:
            type_summary = ", ".join([f"{k}: {v}条" for k, v in type_counts.items()])
            summary_parts.append(f"记忆类型分布: {type_summary}")
        
        return " | ".join(summary_parts)
    
    async def format_context_for_llm(self, context: Dict[str, Any], max_length: int = None) -> str:
        """
        将上下文格式化为适合LLM的格式
        
        Args:
            context: 上下文字典
            max_length: 最大长度限制
            
        Returns:
            格式化的上下文字符串
        """
        if max_length is None:
            max_length = self.max_context_length
        
        formatted_parts = []
        
        # 添加上下文摘要
        if context.get("context_summary"):
            formatted_parts.append(f"相关历史记忆: {context['context_summary']}")
        
        # 添加关键概念
        if context.get("key_concepts"):
            concepts = ", ".join(context["key_concepts"][:5])  # 限制概念数量
            formatted_parts.append(f"关键概念: {concepts}")
        
        # 根据查询类型添加详细信息
        query_type = context.get("retrieval_stats", {}).get("query_type", "general")
        
        if query_type == "temporal":
            # 时间相关查询的详细信息
            temporal_memories = context.get("temporal_memories", [])
            if temporal_memories:
                formatted_parts.append("时间相关记忆详情:")
                for memory in temporal_memories[:3]:
                    time_ref = memory.get('time_reference', '')
                    content = memory["content"]
                    timestamp = memory["timestamp"]
                    formatted_parts.append(f"- {time_ref} ({timestamp}): {content}")
        
        elif query_type == "factual":
            # 事实性查询的详细信息
            factual_memories = context.get("factual_memories", [])
            if factual_memories:
                formatted_parts.append("用户个人信息:")
                for memory in factual_memories[:3]:
                    content = memory["content"]
                    formatted_parts.append(f"• {content}")
            else:
                # 如果没有找到事实记忆，尝试从相关记忆中查找
                relevant_memories = context.get("relevant_memories", [])
                if relevant_memories:
                    formatted_parts.append("相关记忆:")
                    for memory in relevant_memories[:2]:
                        content = memory["content"]
                        formatted_parts.append(f"• {content}")
        
        else:
            # 一般性查询的详细信息
            relevant_memories = context.get("relevant_memories", [])
            if relevant_memories:
                formatted_parts.append("详细相关记忆:")
                for memory in relevant_memories[:3]:  # 只显示前3个最相关的
                    content = memory["content"]
                    memory_type = memory["type"]
                    timestamp = memory["timestamp"]
                    similarity = memory.get("similarity", 0)
                    formatted_parts.append(f"- [{memory_type}] (相似度:{similarity:.2f}, {timestamp}): {content}")
        
        # 组合所有部分
        formatted_context = "\n".join(formatted_parts)
        
        # 检查长度限制
        if len(formatted_context) > max_length:
            # 截断到最大长度
            formatted_context = formatted_context[:max_length] + "..."
        
        return formatted_context
    
    async def should_use_memory(self, query: str) -> bool:
        """
        判断是否应该使用记忆检索
        
        Args:
            query: 用户查询
            
        Returns:
            是否应该使用记忆
        """
        # 扩展的启发式规则
        memory_triggers = [
            # 时间相关
            "之前", "以前", "刚才", "刚才说", "之前问", "之前提到", "昨天", "前天", "上周", "上个月",
            "什么时候", "时间", "日期", "吃了什么", "做了什么", "去了哪里",
            
            # 记忆相关
            "记得", "记住", "回忆", "历史", "之前讨论", "之前聊", "说过", "提到过",
            
            # 连续性
            "继续", "接着", "然后", "还有", "另外", "补充", "再说", "继续聊",
            
            # 个人信息（改进匹配逻辑，支持带问号的查询）
            "我叫什么", "我的名字", "我叫", "我的姓名", "我是谁", "我的身份",
            "我喜欢", "我不喜欢", "我的爱好", "我的职业", "我的工作",
            "我的年龄", "我的生日", "我的家乡", "我的学校", "我的公司",
            "你叫什么", "你的名字", "你是谁", "你的身份"
        ]
        
        # 清理查询文本，移除标点符号进行匹配
        query_clean = query.lower().replace("？", "").replace("?", "").replace("。", "").replace(".", "").strip()
        
        # 检查是否匹配任何触发词
        for trigger in memory_triggers:
            if trigger in query_clean:
                return True
        
        return False
    
    async def get_memory_insights(self, query: str) -> Dict[str, Any]:
        """
        获取记忆洞察信息
        
        Args:
            query: 用户查询
            
        Returns:
            记忆洞察信息
        """
        context = await self.retrieve_relevant_context(query)
        
        insights = {
            "has_relevant_memory": len(context.get("relevant_memories", [])) > 0,
            "memory_count": len(context.get("relevant_memories", [])),
            "context_summary": context.get("context_summary", ""),
            "key_concepts": context.get("key_concepts", []),
            "retrieval_stats": context.get("retrieval_stats", {}),
            "recommendation": self._generate_memory_recommendation(context)
        }
        
        return insights
    
    def _generate_memory_recommendation(self, context: Dict[str, Any]) -> str:
        """
        生成记忆使用建议
        
        Args:
            context: 上下文信息
            
        Returns:
            建议文本
        """
        query_type = context.get("retrieval_stats", {}).get("query_type", "general")
        memory_count = len(context.get("relevant_memories", []))
        
        if memory_count == 0:
            return "没有找到相关历史记忆，可以正常回答用户问题。"
        
        if query_type == "temporal":
            temporal_count = len(context.get("temporal_memories", []))
            if temporal_count > 0:
                return f"找到 {temporal_count} 条时间相关记忆，建议重点引用这些时间信息。"
            else:
                return f"找到 {memory_count} 条相关记忆，建议在回答中引用这些历史信息。"
        
        elif query_type == "factual":
            factual_count = len(context.get("factual_memories", []))
            if factual_count > 0:
                return f"找到 {factual_count} 条事实记忆，建议优先使用这些准确的事实信息。"
            else:
                return f"找到 {memory_count} 条相关记忆，建议在回答中引用这些历史信息。"
        
        else:
            if memory_count <= 3:
                return f"找到 {memory_count} 条相关记忆，建议在回答中引用这些历史信息。"
            else:
                return f"找到 {memory_count} 条相关记忆，建议重点引用最相关的几条记忆。"
